gpt4 book ai didi

c++ - OpenCV多线程给出错误

转载 作者:行者123 更新时间:2023-11-28 05:13:36 27 4
gpt4 key购买 nike

我正在以下列方式在多个线程上运行 OpenCV 代码:

std::thread t1(runOnSingleCamera, alphaFile, featureToUse, classifier,0);
std::thread t2(runOnSingleCamera, betaFile, featureToUse, classifier,1);
std::thread t3(runOnSingleCamera, gammaFile, featureToUse, classifier,2);
std::thread t4(runOnSingleCamera, deltaFile, featureToUse, classifier,3);
t1.join();
t2.join();
t3.join();
t4.join();

这编译很好,但是当我运行它时,我遇到了各种错误,它甚至偶尔能工作......

这是我遇到的一些错误的示例:

tom@thinkpad:~/Documents/Project/reidThermal/src$ ./main -d=1 -c=0 -f=1
Segmentation fault (core dumped)
tom@thinkpad:~/Documents/Project/reidThermal/src$ ./main -d=1 -c=0 -f=1

(betaInput.webm:8571): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget'

(betaInput.webm:8571): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget'

(betaInput.webm:8571): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget'

(betaInput.webm:8571): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget'

(betaInput.webm:8571): Gtk-CRITICAL **: IA__gtk_widget_new: assertion 'g_type_is_a (type, GTK_TYPE_WIDGET)' failed

(betaInput.webm:8571): Gtk-CRITICAL **: IA__gtk_widget_new: assertion 'g_type_is_a (type, GTK_TYPE_WIDGET)' failed
Segmentation fault (core dumped)
tom@thinkpad:~/Documents/Project/reidThermal/src$ ./main -d=1 -c=0 -f=1

(alphaInput.webm:8593): GLib-GObject-WARNING **: invalid cast from 'CvImageWidget' to 'CvImageWidget'

** (alphaInput.webm:8593): CRITICAL **: void cvImageWidget_size_allocate(GtkWidget*, GtkAllocation*): assertion 'CV_IS_IMAGE_WIDGET (widget)' failed

** (alphaInput.webm:8593): CRITICAL **: void cvImageWidget_realize(GtkWidget*): assertion 'CV_IS_IMAGE_WIDGET (widget)' failed
**
Gtk:ERROR:/build/gtk+2.0-KsZKkB/gtk+2.0-2.24.30/gtk/gtkwidget.c:8861:gtk_widget_real_map: assertion failed: (gtk_widget_get_realized (widget))
Aborted (core dumped)
tom@thinkpad:~/Documents/Project/reidThermal/src$ ./main -d=1 -c=0 -f=1
/usr/share/themes/Ambiance/gtk-2.0/gtkrc:720: Unable to find include file: "apps/ff.rc"

(betaInput.webm:8615): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget'

(betaInput.webm:8615): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget'

(betaInput.webm:8615): Gtk-CRITICAL **: IA__gtk_widget_new: assertion 'g_type_is_a (type, GTK_TYPE_WIDGET)' failed
Segmentation fault (core dumped)

有没有人以前见过这个/知道出了什么问题以及如何解决?

使用 gdb 运行给出以下结果:

Thread 4 "main" received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7fffdb7fe700 (LWP 29317)]
0x0000000000000000 in ?? ()

在询问时,我使用的是最新版本的 Ubuntu,使用的是最新版本的 OpenCV。

下面是按要求提供的完整代码,它很长并且由多个元素组成,我想问题可能出现在早期或声明中,或者可能与 imshow 函数不兼容:

int runOnSingleCamera(String file, int featureToUse, int classifier, int cameraID) 
{
//enable velocity
int timeSteps = 0;

string windowName = file; // window name

Mat img, outputImage, foreground; // image objects
VideoCapture cap;

bool keepProcessing = true; // loop control flag
unsigned char key; // user input
int EVENT_LOOP_DELAY = 40; // delay for GUI window, 40 ms equates to 1000ms/25fps = 40ms per frame

vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
int width = 40;
int height = 100;
int learning = 1000;
int padding = 40;

// if command line arguments are provided try to read image/video_name
// otherwise default to capture from attached H/W camera
if((cap.open(file) == true))
{
// create window object (use flag=0 to allow resize, 1 to auto fix size)
namedWindow(windowName, 1);

// create background / foreground Mixture of Gaussian (MoG) model
Ptr<BackgroundSubtractorMOG2> MoG = createBackgroundSubtractorMOG2(500,25,false);

HOGDescriptor hog;
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

CascadeClassifier cascade = CascadeClassifier(CASCADE_TO_USE);

Ptr<SuperpixelSEEDS> seeds;

// start main loop
while(keepProcessing)
{
int64 timeStart = getTickCount();

if (cap.isOpened())
{
cap >> img;

if(img.empty())
{
std::cerr << "End of video file reached" << std::endl;
exit(0);
}
outputImage = img.clone();

cvtColor(img, img, CV_BGR2GRAY);
}
else
{
// if not a capture object set event delay to zero so it waits
// indefinitely (as single image file, no need to loop)
EVENT_LOOP_DELAY = 0;
}

// update background model and get background/foreground
MoG->apply(img, foreground, (double)(1.0/learning));

//imshow("old foreground", foreground);

/////////////////////////////////////////////////////////////////////////////////SUPERPIXELS
int useSuperpixels = 0;

if(useSuperpixels == 1)
{
Mat seedMask, labels, result;

result = img.clone();

int width = img.size().width;
int height = img.size().height;

seeds = createSuperpixelSEEDS(width, height, 1, 2000, 10, 2, 5, true);

seeds->iterate(img, 10);

seeds->getLabels(labels);

vector<int> counter(seeds->getNumberOfSuperpixels(),0);
vector<int> numberOfPixelsPerSuperpixel(seeds->getNumberOfSuperpixels(),0);

vector<bool> useSuperpixel(seeds->getNumberOfSuperpixels(),false);

for(int i = 0; i<foreground.rows; i++)
{
for(int j = 0; j<foreground.cols; j++)
{
numberOfPixelsPerSuperpixel[labels.at<int>(i,j)] += 1;
if(foreground.at<unsigned char>(i,j)==255)
{
counter[labels.at<int>(i,j)] += 1;
}
}
}

for(int i = 0; i<counter.size(); i++)
{
if(counter[i]/numberOfPixelsPerSuperpixel[i] > 0.0001)
{
useSuperpixel[i] = true;
}
}

for(int i = 0; i<foreground.rows; i++)
{
for(int j = 0; j<foreground.cols; j++)
{
if(useSuperpixel[labels.at<int>(i,j)] == true)
{
foreground.at<unsigned char>(i,j) = 255;
}
else
{
foreground.at<unsigned char>(i,j) = 0;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////
else
{
// perform erosion - removes boundaries of foreground object
erode(foreground, foreground, Mat(),Point(),1);

// perform morphological closing
dilate(foreground, foreground, Mat(),Point(),5);
erode(foreground, foreground, Mat(),Point(),1);
}
//imshow("foreground", foreground);

// get connected components from the foreground
findContours(foreground, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

// iterate through all the top-level contours,
// and get bounding rectangles for them (if larger than given value)

for(int idx = 0; idx >=0; idx = hierarchy[idx][0])
{
Rect r = boundingRect(contours[idx]);

// adjust bounding rectangle to be padding% larger
// around the object
r.x = max(0, r.x - (int) (padding/100.0 * (double) r.width));
r.y = max(0, r.y - (int) (padding/100.0 * (double) r.height));

r.width = min(img.cols - 1, (r.width + 2 * (int) (padding/100.0 * (double) r.width)));
r.height = min(img.rows - 1, (r.height + 2 * (int) (padding/100.0 * (double) r.height)));

// draw rectangle if greater than width/height constraints and if
// also still inside image
if ((r.width >= width) && (r.height >= height) && (r.x + r.width < img.cols) && (r.y + r.height < img.rows))
{
vector<Rect> found, found_filtered;

Mat roi = outputImage(r);

if (classifier == 1)
{
//changing last parameter helps deal with multiple rectangles per person
if (cameraID == 3)
{
hog.detectMultiScale(roi, found, 0, Size(8,8), Size(32,32), 1.05, 5);
}
else
{
hog.detectMultiScale(roi, found, 0, Size(8,8), Size(64,64), 1.05, 5);
}
}
else
{
if (cameraID == 3)
{
cascade.detectMultiScale(roi, found, 1.1, 4, CV_HAAR_DO_CANNY_PRUNING, cvSize(32,32));
}
else
{
cascade.detectMultiScale(roi, found, 1.1, 4, CV_HAAR_DO_CANNY_PRUNING, cvSize(64,64));
}
}

for(size_t i = 0; i < found.size(); i++ )
{
Rect rec = found[i];

rec.x += r.x;
rec.y += r.y;

size_t j;
// Do not add small detections inside a bigger detection.
for ( j = 0; j < found.size(); j++ )
{
if ( j != i && (rec & found[j]) == rec )
{
break;
}
}

if (j == found.size())
{
found_filtered.push_back(rec);
}
}
for (size_t i = 0; i < found_filtered.size(); i++)
{
Rect rec = found_filtered[i];

// The HOG/Cascade detector returns slightly larger rectangles than the real objects,
// so we slightly shrink the rectangles to get a nicer output.
rec.x += rec.width*0.1;
rec.width = rec.width*0.8;
rec.y += rec.height*0.1;
rec.height = rec.height*0.8;
// rectangle(img, rec.tl(), rec.br(), cv::Scalar(0,255,0), 3);

Point2f center = Point2f(float(rec.x + rec.width/2.0), float(rec.y + rec.height/2.0));

Mat regionOfInterest;

Mat regionOfInterestOriginal = img(rec);
//Mat regionOfInterestOriginal = img(r);

Mat regionOfInterestForeground = foreground(rec);
//Mat regionOfInterestForeground = foreground(r);

bitwise_and(regionOfInterestOriginal, regionOfInterestForeground, regionOfInterest);

Mat clone = regionOfInterest.clone();

resize(clone, regionOfInterest, Size(64,128), CV_INTER_CUBIC);

imshow("roi", regionOfInterest);

double huMoments[7];
vector<double> hu(7);
Mat hist;
vector<float> descriptorsValues;

Mat feature;

if(featureToUse == 1) //HuMoments
{
vector<vector<Point> > contoursHu;
vector<Vec4i> hierarchyHu;

findContours(regionOfInterest, contoursHu, hierarchyHu, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

double largestSize,size;
int largestContour;

for(int i = 0; i < contoursHu.size(); i++)
{
size = contoursHu[i].size();

if(size > largestSize)
{
largestSize = size;
largestContour = i;
}
}
Moments contourMoments;

contourMoments = moments(contoursHu[largestContour]);

HuMoments(contourMoments, huMoments);

hu.assign(huMoments,huMoments+7);

feature = Mat(hu);
feature = feature.t();
}
else if(featureToUse == 2) //HistogramOfIntensities
{
int histSize = 16; // bin size - need to determine which pixel threshold to use
float range[] = {0,255};
const float *ranges[] = {range};
int channels[] = {0, 1};

calcHist(&regionOfInterest, 1, channels, Mat(), hist, 1, &histSize, ranges, true, false);

feature = hist.clone();
feature = feature.t();
}

else if(featureToUse == 3) //HOG
{
//play with these parameters to change HOG size
cv::HOGDescriptor descriptor(Size(64, 128), Size(16, 16), Size(16, 16), Size(16, 16), 4, -1, 0.2, true, 64);

descriptor.compute(regionOfInterest, descriptorsValues);

feature = Mat(descriptorsValues);
feature = feature.t();
}

else if(featureToUse == 4) //Correlogram
{
Mat correlogram(8,8,CV_64F);
Mat occurances(8,8,CV_8U);

int xIntensity, yIntensity;

for(int i = 0; i<regionOfInterest.rows; i++)
{
for(int j = 0; j<regionOfInterest.cols; j++)
{
xIntensity = floor(regionOfInterest.at<unsigned char>(i,j)/32);

for(int k = i; k<regionOfInterest.rows; k++)
{
for(int l = 0; l<regionOfInterest.cols; l++)
{
if((k == i && l > j) || k > i)
{
yIntensity = floor(regionOfInterest.at<unsigned char>(k,l)/32);

correlogram.at<double>(xIntensity,yIntensity) += (norm(Point(i,j)-Point(k,l)));
correlogram.at<double>(yIntensity,xIntensity) += (norm(Point(i,j)-Point(k,l)));

occurances.at<unsigned char>(xIntensity,yIntensity) += 1;
occurances.at<unsigned char>(yIntensity,xIntensity) += 1;
}
}
}
}
}
//average it out
for(int i = 0; i<correlogram.rows; i++)
{
for(int j = 0; j<correlogram.cols; j++)
{
correlogram.at<double>(i,j) = occurances.at<unsigned char>(i,j);
}
}

feature = correlogram.reshape(1,1);
}
else if(featureToUse == 5) //Flow
{

}

feature.convertTo(feature, CV_64F);

normalize(feature, feature, 1, 0, NORM_L1, -1, Mat());
cout << "New Feature" << endl << feature << endl;

//classify first target
if(targets.size() == 0) //if first target found
{
Person person(0, center.x, center.y, timeSteps, rec.width, rec.height);

person.kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height);

Rect p = person.kalmanPredict();

person.updateFeatures(feature);

person.setCurrentCamera(cameraID);

rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3);

char str[200];
sprintf(str,"Person %d",person.getIdentifier());

putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0));

targets.push_back(person);
}
else
{
vector<double> mDistances;
bool singleEntry = false;

for(int i = 0; i<targets.size(); i++)
{
if(targets[i].getFeatures().rows == 1)
{
singleEntry = true;
}
}

for(int i = 0; i<targets.size(); i++)
{
Mat covar, mean;
Mat data = targets[i].getFeatures();

calcCovarMatrix(data,covar,mean,CV_COVAR_NORMAL|CV_COVAR_ROWS);

// cout << i << " data" << endl << data << endl;

// cout << i << " Covar" << endl << covar << endl;

// cout << i << " mean" << endl << mean << endl;

double mDistance;

if(singleEntry == false)
{
Mat invCovar;

invert(covar,invCovar,DECOMP_SVD);

mDistance = Mahalanobis(feature,mean,invCovar);

cout << i << " Mahalanobis Distance" << endl << mDistance << endl;
}
else
{
mDistance = norm(feature,mean,NORM_L1);

cout << i << " Norm Distance" << endl << mDistance << endl;
}
mDistances.push_back(mDistance);
}

Mat test = Mat(mDistances);
cout << "Distances" << endl << test << endl;

double sum = 0.0;
for(int i = 0; i<mDistances.size(); i++)
{
sum += mDistances[i];
}
for(int i = 0; i<mDistances.size(); i++)
{
mDistances[i] = sum/mDistances[i];
}

normalize(mDistances,mDistances,1,0,NORM_L1,-1,Mat());

Mat probabilities = Mat(mDistances);

cout << "Probabilities" << endl << probabilities << endl;

//special case to classify second target
if(targets.size() == 1)
{
if(fabs(center.x-targets[0].getLastPosition().x)<100 and fabs(center.y-targets[0].getLastPosition().y)<100)
{
targets[0].kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height);

Rect p = targets[0].kalmanPredict();

targets[0].updateFeatures(feature);

targets[0].setCurrentCamera(cameraID);

rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3);

char str[200];
sprintf(str,"Person %d",targets[0].getIdentifier());

putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0));
}
else
{
Person person(1, center.x, center.y, timeSteps, rec.width, rec.height);

person.kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height);

Rect p = person.kalmanPredict();

person.updateFeatures(feature);

person.setCurrentCamera(cameraID);

rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3);

char str[200];
sprintf(str,"Person %d",person.getIdentifier());

putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0));

targets.push_back(person);
}
}

else
{
double greatestProbability = 0.0;
int identifier = 0;

double min, max;
Point min_loc, max_loc;
minMaxLoc(probabilities, &min, &max, &min_loc, &max_loc);

greatestProbability = max;
identifier = max_loc.y;

cout << greatestProbability << " at " << identifier << endl;

if(greatestProbability >= 0.5)
{
targets[identifier].kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height);

Rect p = targets[identifier].kalmanPredict();

targets[identifier].updateFeatures(feature);

targets[identifier].setCurrentCamera(cameraID);

rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3);

char str[200];
sprintf(str,"Person %d",targets[identifier].getIdentifier());

putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0));
}
else
{
int identifier = targets.size();
Person person(identifier, center.x, center.y, timeSteps, rec.width, rec.height);

person.kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height);

Rect p = person.kalmanPredict();

person.updateFeatures(feature);

person.setCurrentCamera(cameraID);

rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3);

char str[200];
sprintf(str,"Person %d",person.getIdentifier());

putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0));

targets.push_back(person);
}
}
}
}
rectangle(outputImage, r, Scalar(0,0,255), 2, 8, 0);
}
}
// display image in window
imshow(windowName, outputImage);

key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - (((getTickCount() - timeStart) / getTickFrequency())*1000)));

if (key == 'x')
{
// if user presses "x" then exit
std::cout << "Keyboard exit requested : exiting now - bye!" << std::endl;
keepProcessing = false;
}
timeSteps += 1;
}
// the camera will be deinitialized automatically in VideoCapture destructor
// all OK : main returns 0
return 0;
}
// not OK : main returns -1
return -1;
}

最佳答案

这是因为您滥用了 OpenCV 的某些不支持在多线程环境中运行的功能,或者您没有正确使用互斥锁和监视器等控制机制来限制对关键数据的访问你的代码的一部分一次到一个线程。除非您共享更多代码,否则我们无法告诉您您做错了什么。从您的日志看来,某种初始化似乎不止一次运行。


从您的代码和日志中,我想到了两件事:

  1. 您是否不小心尝试在多个线程上访问视频捕获硬件?
  2. 也许创建一个新窗口会导致 GTK 中某些东西的初始化。尝试在主线程上创建窗口,看看是否有帮助。请注意,无论是否使用 OpenCV,为您的 UI 使用多个线程都不是一个好主意。

如果这些都没有帮助,请尝试向您的代码添加一些日志输出,以便我们确定是哪一行导致了错误。


所以它毕竟是#2。要修复它,您必须将所有 namedWindow 调用移至主线程。之后,如果 imshow 调用仍然失败,您也必须将其移至主线程。每个线程都需要一个条件变量,线程写入的全局变量和主线程用来更新窗口的全局变量。我会提供代码,但我对 C++ 并发了解不多。您可以在此处阅读有关此任务的更多信息:waiting thread until a condition has been occurred

关于c++ - OpenCV多线程给出错误,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/43094972/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com