gpt4 book ai didi

c++ - 眨眼检测 OpenCV C++

转载 作者:太空宇宙 更新时间:2023-11-04 13:28:13 26 4
gpt4 key购买 nike

我正在尝试编写一个眨眼检测器,但它无法正常工作。

它正确地找到了脸和眼睛,然后我尝试在眼睛区域(瞳孔)找到一个圆圈,但并不总能找到。当它找到时,它会检测到眨眼,尽管眼睛没有眨眼(计数器递增)。

我尝试了不同的方法和过滤器(HoughCircles、Canny、threshold、medianBlur、smooth)但它没有改变。

这是我的代码:

#include "stdafx.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#include <math.h>
#include <iomanip>
#include <sstream>
#include <string>

#include <opencv2\objdetect\objdetect.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\video\tracking.hpp>
#include "opencv2/opencv.hpp"

// Include OpenCV's C++ Interface
#include "opencv2/opencv.hpp"


const float eyeTop = 0.26f; //y
const float eyeSide = 0.16f; //x
const float eyeHeight = 0.28f; //h
const float eyeWidth = 0.65f; //w
const int calibrationDefault = 100;

int blinknumber =0;
int calibrationFace = calibrationDefault;

bool leftEyeOpen = true;
bool rightEyeOpen = true;
int blinkNumberLeft = 0;
int blinkNumberRight = 0;

using namespace cv;
using namespace std;

const std::string casceye_name = "C:\\Users\\Hossein Hezarpisheh\\Documents\\Visual Studio 2010\\Projects\\projek1\\haarcascade_mcs_eyepair_big.xml";
const std::string face_cascade_name = "C:\\Users\\Hossein Hezarpisheh\\Documents\\Visual Studio 2010\\Projects\\projek1\\haarcascade_frontalface_alt.xml";

Mat lastFace;
Mat actualFace;

void headTracing(Mat grayImage, Mat image, CascadeClassifier casceye, CascadeClassifier cascFace, Rect &faceArea);
Rect detectLargestObject(Mat grayImage, CascadeClassifier cascFace);
void eyeTracking(Mat &actualFace, Mat &lastFace,int &blinknumber);
void getEyes(Mat &face, Mat &eye);

namespace patch
{
template < typename T > string to_string(T n )
{
ostringstream stm ;
stm << n ;
return stm.str() ;
}
}

int main()
{
Rect faceArea;
CascadeClassifier cascFace, casceye;

if (!cascFace.load(face_cascade_name)){ printf("--(!)Error loading face cascade\n"); return -1; };
if (!casceye.load(casceye_name)){ printf("--(!)Error loading eyes cascade\n"); return -1; };


cout << "\n\tESC - Programm beenden\n\tc - zaehler auf 0 setzen\n\n";

namedWindow("Blinzel Erkennung", CV_WINDOW_AUTOSIZE);

VideoCapture capture(0);
if (!capture.isOpened())
{
cout<<"Kamera wurde nicht gefunden!"<<endl;
return 1;
}

Mat Image;

while (1)
{
Mat GrayImage;
capture >> Image;

if (Image.empty()){
continue;
}

flip(Image, Image, 1);
cvtColor(Image, GrayImage, CV_BGR2GRAY);

headTracing(GrayImage, Image, casceye , cascFace, faceArea);


switch (waitKey(2)) {
case 27:
return 0;
break;
case120:
calibrationFace = 0;
break;

case 99: // c key - zähler auf 0 setzen
leftEyeOpen = true;
rightEyeOpen = true;
blinkNumberLeft = 0;
blinkNumberRight = 0;
break;
}
}
return 0;
}




void calcFlow(const Mat& flow, Mat& cflowmap, int step, int &globalMoveX, int &globalMoveY)
{
int localMoveX = 0;
int localMoveY = 0;

for (int y = 0; y < cflowmap.rows; y += step)
{
for (int x = 0; x < cflowmap.cols; x += step)
{
const Point2f& fxy = flow.at<Point2f>(y, x);

localMoveX = localMoveX + fxy.x;
localMoveY = localMoveY + fxy.y;
}
}

globalMoveX = (localMoveX / (cflowmap.cols * cflowmap.rows))*2;
globalMoveY = (localMoveY / (cflowmap.rows * cflowmap.cols))*2;
}


void headTracing(Mat grayImage, Mat image, CascadeClassifier casceye, CascadeClassifier cascFace, Rect &faceArea) {

Rect face = detectLargestObject(grayImage, cascFace);
if (face.width == 0 && face.height == 0) {
imshow("Ergebnis", image);
return;
}

calibrationFace = calibrationFace - 1;

if (faceArea.height == 0|| calibrationFace < 1) {
faceArea = face;
lastFace = grayImage(face);
calibrationFace = calibrationDefault;
}
else {

actualFace = grayImage(faceArea);

Mat flow, cflow;
calcOpticalFlowFarneback(lastFace, actualFace, flow, 0.5, 3, 15, 3, 5, 1.2, 0);

cvtColor(lastFace, cflow, CV_GRAY2BGR);

int globalMoveX, globalMoveY;

calcFlow(flow, cflow, 1, globalMoveX, globalMoveY);


faceArea.x = faceArea.x + globalMoveX;
faceArea.y = faceArea.y + globalMoveY;

if (faceArea.x < 0) {
faceArea.x = 0;
}
if (faceArea.y < 0) {
faceArea.y = 0;
}

if (faceArea.x + faceArea.width > image.size().width - 1) {
faceArea.x = image.size().width - faceArea.width - 1;
}
if (faceArea.y + faceArea.height > image.size().height - 1) {
faceArea.y = image.size().height - faceArea.height - 1;
}
//rectangle(image,faceArea, 12);
actualFace = grayImage(faceArea);


eyeTracking(actualFace, lastFace,blinknumber); //jetzt haben wir zwei stabilisierte Frames(aktuell&vorherige) nun können wir die Bewegung berechnen
swap(lastFace, actualFace); //aktuelles Frame wird zu vorherigen Frame und umgekehrt
}
putText(image,patch::to_string(blinknumber), cvPoint(520, 45), FONT_HERSHEY_COMPLEX_SMALL, 1.5, cvScalar(100, 100, 255), 1, CV_AA);

imshow("Ergebnis", image); //Ergebniss anzeigen
}

Rect detectLargestObject(Mat grayImage, CascadeClassifier cascFace) {

Rect value;

vector<Rect> faces;
cascFace.detectMultiScale(grayImage, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE | CV_HAAR_FIND_BIGGEST_OBJECT, Size(150, 150), Size(300, 300));
if (faces.size() > 0) {
return faces[0];
}
else {
return value;
}
}

void eyeTracking(Mat &actualFace, Mat &lastFace,int &blinknumber) {

Mat eyeActual;
getEyes(actualFace, eyeActual);
Mat eyeActualGray;
cvtColor(eyeActual, eyeActualGray, COLOR_GRAY2BGR);

//medianBlur(eyeActual, eyeActual,5);
//cvtColor(eyeActual, eyeActualGray, COLOR_BGR2GRAY);

namedWindow("Kreis", CV_WINDOW_AUTOSIZE);

//Canny(eyeActual,eyeActual,5,70,3);
medianBlur(eyeActual, eyeActual,5);
//threshold(eyeActual,eyeActual,50,200,THRESH_BINARY);
//vector<vector<Point> > contours;

vector <Vec3f> circles;

//findContours(eyeActual.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
//drawContours(eyeActual, contours, -1, CV_RGB(255,255,255), -1);

HoughCircles( eyeActual, circles, CV_HOUGH_GRADIENT, 1, eyeActual.rows/8, 50,25,5,15 );

for( size_t i = 0; i < circles.size(); i++ )
{

Vec3i c = circles[i];
//circle(eyeActualGray,Point(c[0], c[1]), c[2], Scalar(0,0,255), 2);
circle( eyeActualGray, Point(c[0], c[1]), 2, Scalar(0,255,0), 2);

blinknumber=blinknumber+1;

}
imshow("Kreis", eyeActualGray);

}

void getEyes(Mat &face, Mat &eye) {

Size faceSize = face.size();

int eye_area_width = faceSize.width * eyeWidth;
int eye_area_height = faceSize.width *eyeHeight;
int eye_area_top = faceSize.height *eyeTop;

Rect rightEyeArea(faceSize.width*eyeSide, eye_area_top, eye_area_width, eye_area_height);
eye = face(rightEyeArea);
}

最佳答案

问题的描述听起来像是模式工程问题,而不是C++问题或OpenCV问题。如果我对你的理解正确的话,你的眼睛检测工作“相当好”。所描述的眨眼检测通过不再检测眼睛来工作。这意味着您可以通过检测每一 帧中的眼睛来检测是否眨眼。这需要一个非常好的眼睛检测器。缺少一个检测结果会导致眨眼得出结论。

一种更强大的方法会记住眼睛周围的区域,并寻找像素的大变化 - 眼睑看起来完全不同。

但是。查看您的代码,我看不到实现您描述的任何内容的代码。您调用 HoughCircles 来检测圆。然后,对于检测到的每个圆圈,您都将一个添加到眨眼计数器?!

此外,您还有一个似乎未使用的 blinkCounterLeft 和 blinkCounterRight。结合不一致的缩进和注释掉的代码,我怀疑您已经失去了对自己代码的监督。

关于c++ - 眨眼检测 OpenCV C++,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/32581128/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com