gpt4 book ai didi

c# - 内存泄漏问题。使用 OpenCVSharp 在 Unity 中进行眼动追踪

转载 作者:太空狗 更新时间:2023-10-29 21:57:48 30 4
gpt4 key购买 nike

我已经在这个项目上工作了几个月,我正在尝试使用 OpenCVSharp 将眼动追踪集成到 Unity 中。我已经设法让一切正常工作,包括瞳孔的实际跟踪等,但是我遇到了内存泄漏。基本上在程序运行 20-30 秒后它会卡住,并且控制台错误显示“无法分配(在此处插入数字)位”。查看程序运行过程中的内存使用情况后,您可以看到它的使用量稳步攀升,直到达到最大值然后崩溃。

现在我花了很长时间尝试解决这个问题,并阅读了很多关于正确发布图像/存储等的帮助帖子。尽管我正在这样做,但似乎没有正确释放它们。我尝试使用垃圾收集器强制它回收内存,但这似乎也不起作用。我只是对图像做了一些根本性的错误以及我如何回收它们吗?或者是每帧创建新图像(即使我正在发布它们)导致问题。

如有任何帮助,我们将不胜感激。这是下面的代码,您可以忽略更新函数中的很多内容,因为它与实际的跟踪部分和校准有关。我意识到代码很乱,对此感到抱歉!要担心的主要部分是 EyeDetection()。

using UnityEngine;
using System.Collections;
using System;
using System.IO;
using OpenCvSharp;
using OpenCvSharp.Blob;
//using System.Xml;
//using System.Threading;
//using AForge;

//using OpenCvSharp.Extensions;
//using System.Windows.Media;
//using System.Windows.Media.Imaging;



public class CaptureScript2 : MonoBehaviour
{
//public GameObject planeObj;
public WebCamTexture webcamTexture; //Texture retrieved from the webcam
//public Texture2D texImage; //Texture to apply to plane
public string deviceName;

private int devId = 1;
private int imWidth = 800; //camera width
private int imHeight = 600; //camera height
private string errorMsg = "No errors found!";
private static IplImage camImage; //Ipl image of the converted webcam texture
//private static IplImage yuv;
//private static IplImage dst;
private CvCapture cap; //Current camera capture
//private IplImage eyeLeft;
//private IplImage eyeRight;
//private IplImage eyeLeftFinal;
//private IplImage eyeRightFinal;
private double leftEyeX;
private double leftEyeY;
private double rightEyeX;
private double rightEyeY;
private int calibState;
private double LTRCPx;
private double LTLCPx;
private double LBLCPy;
private double LTLCPy;
private double RTRCPx;
private double RTLCPx;
private double RBLCPy;
private double RTLCPy;
private double gazeWidth;
private double gazeHeight;
private double gazeScaleX;
private double gazeScaleY;

public static CvMemStorage storageFace;
public static CvMemStorage storage;

public static double gazePosX;
public static double gazePosY;

private bool printed = true;
//private CvRect r;
//private IplImage smallImg;

CvColor[] colors = new CvColor[]
{
new CvColor(0,0,255),
new CvColor(0,128,255),
new CvColor(0,255,255),
new CvColor(0,255,0),
new CvColor(255,128,0),
new CvColor(255,255,0),
new CvColor(255,0,0),
new CvColor(255,0,255),
};

//scale for small image
const double Scale = 1.25;
const double scaleEye = 10.0;
const double ScaleFactor = 2.5;
//must show 2 eyes on the screen
const int MinNeighbors = 2;
const int MinNeighborsFace = 1;


// Use this for initialization
void Start ()
{


//Webcam initialisation
WebCamDevice[] devices = WebCamTexture.devices;
Debug.Log ("num:" + devices.Length);

for (int i=0; i<devices.Length; i++)
{
print (devices [i].name);
if (devices [i].name.CompareTo (deviceName) == 1)
{
devId = i;
}
}

if (devId >= 0)
{
//mainImage = new IplImage (imWidth, imHeight, BitDepth.U8, 3);


}

//create capture from current device
cap = Cv.CreateCameraCapture(devId);
//set properties of the capture
Cv.SetCaptureProperty(cap, CaptureProperty.FrameWidth, imWidth);
Cv.SetCaptureProperty(cap, CaptureProperty.FrameHeight, imHeight);
//create window to display capture
//Cv.NamedWindow("Eye tracking", WindowMode.AutoSize);
Cv.NamedWindow ("EyeLeft", WindowMode.AutoSize);
Cv.NamedWindow ("EyeRight", WindowMode.AutoSize);
Cv.NamedWindow ("Face", WindowMode.AutoSize);

calibState = 1;


}


void Update ()
{
if(Input.GetKeyDown(KeyCode.Space) && calibState < 3)
{
calibState++;
}

if(Input.GetMouseButtonDown(0) && calibState < 4)
{
printed = false;
calibState++;

Cv.DestroyAllWindows();
Cv.ReleaseCapture(cap);

cap = Cv.CreateCameraCapture(devId);
}
//if device is connected
if (devId >= 0)
{
//cap = Cv.CreateCameraCapture(devId);
//Cv.Release
//retrieve the current frame from camera
camImage = Cv.QueryFrame(cap);
//detect eyes and apply circles
//
EyeDetection();

Cv.ReleaseImage(camImage);
//PupilTracking();



switch(calibState)
{
case 1:
LTRCPx = leftEyeX;
RTRCPx = rightEyeX;

break;

case 2:

LTLCPx = leftEyeX;
LTLCPy = leftEyeY;
RTLCPx = rightEyeX;
RTLCPy = rightEyeY;

break;
case 3:

LBLCPy = leftEyeY;// + rightEyeY) /2 ;
RBLCPy = rightEyeY;


break;

case 4:

//gazeWidth = (((LTRCPx - LTLCPx) + (RTRCPx - RTLCPx)) / 2) * -1;
//gazeHeight = ((LBLCPy - LTLCPy) + (RBLCPy - RTLCPy)) /2;
gazeWidth = LTLCPx -LTRCPx;
gazeHeight = LBLCPy - LTLCPy;

gazeScaleX = (Screen.width/gazeWidth);
gazeScaleY = Screen.height/gazeHeight;

gazePosX = gazeScaleX *(leftEyeX - LTRCPx);
gazePosY = gazeScaleY *(leftEyeY - LTLCPy);

break;
}


//Cv.ReleaseCapture(cap);

}
else
{
Debug.Log ("Can't find camera!");
}

//print (calibState);
if(printed == false)
{
print ("Gaze pos x = " + gazePosX);
print ("Gaze pos Y = " + gazePosY);
print ("Scale x = " + gazeScaleX);
print ("Scale y = " + gazeScaleY);
print ("Gaze width = " + gazeWidth);
print ("Gaze Height = " + gazeHeight);
print ("left eye x = " + leftEyeX);
print ("left eye Y = " + leftEyeY);
print ("calib state = " + calibState);

printed = true;
}



//Cv.ShowImage("Eye tracking", mainImage);
//Cv.ShowImage ("EyeLeft", grayEyeLeft);
//Cv.ShowImage ("EyeRight", grayEyeRight);

}



void EyeDetection()
{
IplImage mainImage = new IplImage (imWidth, imHeight, BitDepth.U8, 3);

IplImage smallImg = new IplImage(mainImage.Width, mainImage.Height ,BitDepth.U8, 1);
Cv.Resize (camImage, mainImage, Interpolation.Linear);

IplImage gray = new IplImage(mainImage.Size, BitDepth.U8, 1);

Cv.CvtColor (mainImage, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
Cv.ReleaseImage (gray);


//IplImage hack = Cv.LoadImage("\\Users\\User\\Desktop\\Honours Projects\\Project10\\Project\\Assets\\bug.jpeg");
//Cv.Erode (hack, hack);
//Cv.ReleaseImage (hack);

//uint sizeStore = 2877212;
CvHaarClassifierCascade cascadeFace = CvHaarClassifierCascade.FromFile("\\Users\\User\\Documents\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml");

CvMemStorage storageFace = new CvMemStorage();
storageFace.Clear ();

CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascadeFace, storageFace, ScaleFactor, MinNeighborsFace, 0, new CvSize(30,30));

for(int j = 0; j < faces.Total; j++)
{
CvRect face = faces[j].Value.Rect;

CvHaarClassifierCascade cascadeEye = CvHaarClassifierCascade.FromFile ("\\Users\\User\\Documents\\opencv\\sources\\data\\haarcascades\\haarcascade_eye.xml");

IplImage faceImg = new IplImage(face.Width, face.Height, BitDepth.U8, 1);
IplImage faceImgColour = new IplImage(face.Width, face.Height, BitDepth.U8, 3);

CvMemStorage storage = new CvMemStorage();
storage.Clear ();

Cv.SetImageROI(smallImg, face);
Cv.Copy (smallImg, faceImg);
Cv.ResetImageROI(smallImg);

Cv.SetImageROI(mainImage, face);
Cv.Copy (mainImage, faceImgColour);
Cv.ResetImageROI(mainImage);


Cv.ShowImage ("Face", faceImgColour);


CvSeq<CvAvgComp> eyes = Cv.HaarDetectObjects(faceImg, cascadeEye, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
for(int i = 0; i < eyes.Total; i++)
{
CvRect r = eyes[i].Value.Rect;


Cv.SetImageROI(faceImgColour, r);

if(i == 1)
{
IplImage eyeLeft = new IplImage(new CvSize(r.Width, r.Height), BitDepth.U8, 3);

Cv.Copy(faceImgColour, eyeLeft);

IplImage yuv = new IplImage(eyeLeft.Size, BitDepth.U8, 3);
IplImage dst = new IplImage(eyeLeft.Size, BitDepth.U8, 3);
IplImage grayEyeLeft = new IplImage(eyeLeft.Size, BitDepth.U8, 1);
IplImage eyeLeftFinal = new IplImage(Cv.Round(grayEyeLeft.Width * scaleEye), Cv.Round(grayEyeLeft.Height * scaleEye), BitDepth.U8, 1);
Cv.CvtColor(eyeLeft, yuv, ColorConversion.BgrToCrCb);
Cv.Not(yuv, dst);
Cv.CvtColor(dst,eyeLeft,ColorConversion.CrCbToBgr);
Cv.CvtColor(eyeLeft, grayEyeLeft, ColorConversion.BgrToGray);

Cv.Resize (grayEyeLeft, eyeLeftFinal, Interpolation.Linear);
Cv.Threshold(eyeLeftFinal, eyeLeftFinal, 230, 230, ThresholdType.Binary);
CvBlobs b1 = new CvBlobs(eyeLeftFinal);
if(b1.Count > 0)
{
leftEyeX = b1.LargestBlob().Centroid.X;
leftEyeY = b1.LargestBlob().Centroid.Y;
}

Cv.ShowImage ("EyeLeft", eyeLeftFinal);

Cv.ReleaseImage (yuv);
Cv.ReleaseImage (dst);
Cv.ReleaseImage (grayEyeLeft);
Cv.ReleaseImage (eyeLeftFinal);
b1.Clear();

Cv.ReleaseImage (eyeLeft);


}
if(i == 0)
{
IplImage eyeRight = new IplImage(new CvSize(r.Width, r.Height), BitDepth.U8, 3);

Cv.Copy(faceImgColour, eyeRight);

IplImage yuv2 = new IplImage(eyeRight.Size, BitDepth.U8, 3);
IplImage dst2 = new IplImage(eyeRight.Size, BitDepth.U8, 3);
IplImage grayEyeRight = new IplImage(eyeRight.Size, BitDepth.U8, 1);
IplImage eyeRightFinal = new IplImage(Cv.Round(grayEyeRight.Width * scaleEye), Cv.Round(grayEyeRight.Height * scaleEye), BitDepth.U8, 1);
Cv.CvtColor(eyeRight, yuv2, ColorConversion.BgrToCrCb);
Cv.Not(yuv2, dst2);
Cv.CvtColor(dst2,eyeRight,ColorConversion.CrCbToBgr);
Cv.CvtColor(eyeRight, grayEyeRight, ColorConversion.BgrToGray);

Cv.Resize (grayEyeRight, eyeRightFinal, Interpolation.Linear);
Cv.Threshold(eyeRightFinal, eyeRightFinal, 230, 230, ThresholdType.Binary);
CvBlobs b2 = new CvBlobs(eyeRightFinal);

if(b2.Count > 0)
{
rightEyeX = b2.LargestBlob().Centroid.X;
rightEyeY = b2.LargestBlob().Centroid.Y;
}

Cv.ShowImage ("EyeRight", eyeRightFinal);

Cv.ReleaseImage (yuv2);
Cv.ReleaseImage (dst2);
Cv.ReleaseImage (grayEyeRight);
Cv.ReleaseImage (eyeRightFinal);
b2.Clear ();

Cv.ReleaseImage (eyeRight);

}

Cv.ResetImageROI(faceImgColour);
}

//Cv.ShowImage("Eye tracking", mainImage);

Cv.ReleaseImage (faceImg);
Cv.ReleaseImage (faceImgColour);
Cv.ReleaseMemStorage(storage);
Cv.ReleaseHaarClassifierCascade(cascadeEye);


}

Cv.ReleaseMemStorage(storageFace);
Cv.ReleaseHaarClassifierCascade(cascadeFace);


//PupilTracking ();
Cv.ReleaseImage(smallImg);
Cv.ReleaseImage (mainImage);
GC.Collect();

}

void OnGUI ()
{
GUI.Label (new Rect (200, 200, 100, 90), errorMsg);
}

void OnDestroy()
{
Cv.DestroyAllWindows();
Cv.ReleaseCapture(cap);
}

最佳答案

我不熟悉 OpenCV,但一般来说:

  • 我会在更新循环中限制实例化,例如new CvMemStorage()
  • 不要在更新循环中加载数据:CvHaarClassifierCascade.FromFile("\\Users\\User\\Documents\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml"); 应该在启动时加载一次并分配给类变量。
  • 开始时分配,仅在需要时释放

我发现在大多数情况下,有足够的 RAM 可供使用。我在 Start() 上分配要反复使用的内容,尤其是在 Update() 循环中每秒 60 次!

但加载 XML 数据、分配和释放变量(如 storagecascadeEye)必然会在应用每秒尝试这样做 60 次时产生问题。

创建和销毁对象是非常、非常、非常昂贵的。因此请明智而谨慎地这样做,尤其是在处理 OpenCV 对象、位图或加载器等复杂数据结构时。

嗯。

关于c# - 内存泄漏问题。使用 OpenCVSharp 在 Unity 中进行眼动追踪,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/23455458/

30 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com