- c - 在位数组中找到第一个零
- linux - Unix 显示有关匹配两种模式之一的文件的信息
- 正则表达式替换多个文件
- linux - 隐藏来自 xtrace 的命令
我已经在这个项目上工作了几个月,我正在尝试使用 OpenCVSharp 将眼动追踪集成到 Unity 中。我已经设法让一切正常工作,包括瞳孔的实际跟踪等,但是我遇到了内存泄漏。基本上在程序运行 20-30 秒后它会卡住,并且控制台错误显示“无法分配(在此处插入数字)位”。查看程序运行过程中的内存使用情况后,您可以看到它的使用量稳步攀升,直到达到最大值然后崩溃。
现在我花了很长时间尝试解决这个问题,并阅读了很多关于正确发布图像/存储等的帮助帖子。尽管我正在这样做,但似乎没有正确释放它们。我尝试使用垃圾收集器强制它回收内存,但这似乎也不起作用。我只是对图像做了一些根本性的错误以及我如何回收它们吗?或者是每帧创建新图像(即使我正在发布它们)导致问题。
如有任何帮助,我们将不胜感激。这是下面的代码,您可以忽略更新函数中的很多内容,因为它与实际的跟踪部分和校准有关。我意识到代码很乱,对此感到抱歉!要担心的主要部分是 EyeDetection()。
using UnityEngine;
using System.Collections;
using System;
using System.IO;
using OpenCvSharp;
using OpenCvSharp.Blob;
//using System.Xml;
//using System.Threading;
//using AForge;
//using OpenCvSharp.Extensions;
//using System.Windows.Media;
//using System.Windows.Media.Imaging;
public class CaptureScript2 : MonoBehaviour
{
//public GameObject planeObj;
public WebCamTexture webcamTexture; //Texture retrieved from the webcam
//public Texture2D texImage; //Texture to apply to plane
public string deviceName;
private int devId = 1;
private int imWidth = 800; //camera width
private int imHeight = 600; //camera height
private string errorMsg = "No errors found!";
private static IplImage camImage; //Ipl image of the converted webcam texture
//private static IplImage yuv;
//private static IplImage dst;
private CvCapture cap; //Current camera capture
//private IplImage eyeLeft;
//private IplImage eyeRight;
//private IplImage eyeLeftFinal;
//private IplImage eyeRightFinal;
private double leftEyeX;
private double leftEyeY;
private double rightEyeX;
private double rightEyeY;
private int calibState;
private double LTRCPx;
private double LTLCPx;
private double LBLCPy;
private double LTLCPy;
private double RTRCPx;
private double RTLCPx;
private double RBLCPy;
private double RTLCPy;
private double gazeWidth;
private double gazeHeight;
private double gazeScaleX;
private double gazeScaleY;
public static CvMemStorage storageFace;
public static CvMemStorage storage;
public static double gazePosX;
public static double gazePosY;
private bool printed = true;
//private CvRect r;
//private IplImage smallImg;
CvColor[] colors = new CvColor[]
{
new CvColor(0,0,255),
new CvColor(0,128,255),
new CvColor(0,255,255),
new CvColor(0,255,0),
new CvColor(255,128,0),
new CvColor(255,255,0),
new CvColor(255,0,0),
new CvColor(255,0,255),
};
//scale for small image
const double Scale = 1.25;
const double scaleEye = 10.0;
const double ScaleFactor = 2.5;
//must show 2 eyes on the screen
const int MinNeighbors = 2;
const int MinNeighborsFace = 1;
// Use this for initialization
void Start ()
{
//Webcam initialisation
WebCamDevice[] devices = WebCamTexture.devices;
Debug.Log ("num:" + devices.Length);
for (int i=0; i<devices.Length; i++)
{
print (devices [i].name);
if (devices [i].name.CompareTo (deviceName) == 1)
{
devId = i;
}
}
if (devId >= 0)
{
//mainImage = new IplImage (imWidth, imHeight, BitDepth.U8, 3);
}
//create capture from current device
cap = Cv.CreateCameraCapture(devId);
//set properties of the capture
Cv.SetCaptureProperty(cap, CaptureProperty.FrameWidth, imWidth);
Cv.SetCaptureProperty(cap, CaptureProperty.FrameHeight, imHeight);
//create window to display capture
//Cv.NamedWindow("Eye tracking", WindowMode.AutoSize);
Cv.NamedWindow ("EyeLeft", WindowMode.AutoSize);
Cv.NamedWindow ("EyeRight", WindowMode.AutoSize);
Cv.NamedWindow ("Face", WindowMode.AutoSize);
calibState = 1;
}
void Update ()
{
if(Input.GetKeyDown(KeyCode.Space) && calibState < 3)
{
calibState++;
}
if(Input.GetMouseButtonDown(0) && calibState < 4)
{
printed = false;
calibState++;
Cv.DestroyAllWindows();
Cv.ReleaseCapture(cap);
cap = Cv.CreateCameraCapture(devId);
}
//if device is connected
if (devId >= 0)
{
//cap = Cv.CreateCameraCapture(devId);
//Cv.Release
//retrieve the current frame from camera
camImage = Cv.QueryFrame(cap);
//detect eyes and apply circles
//
EyeDetection();
Cv.ReleaseImage(camImage);
//PupilTracking();
switch(calibState)
{
case 1:
LTRCPx = leftEyeX;
RTRCPx = rightEyeX;
break;
case 2:
LTLCPx = leftEyeX;
LTLCPy = leftEyeY;
RTLCPx = rightEyeX;
RTLCPy = rightEyeY;
break;
case 3:
LBLCPy = leftEyeY;// + rightEyeY) /2 ;
RBLCPy = rightEyeY;
break;
case 4:
//gazeWidth = (((LTRCPx - LTLCPx) + (RTRCPx - RTLCPx)) / 2) * -1;
//gazeHeight = ((LBLCPy - LTLCPy) + (RBLCPy - RTLCPy)) /2;
gazeWidth = LTLCPx -LTRCPx;
gazeHeight = LBLCPy - LTLCPy;
gazeScaleX = (Screen.width/gazeWidth);
gazeScaleY = Screen.height/gazeHeight;
gazePosX = gazeScaleX *(leftEyeX - LTRCPx);
gazePosY = gazeScaleY *(leftEyeY - LTLCPy);
break;
}
//Cv.ReleaseCapture(cap);
}
else
{
Debug.Log ("Can't find camera!");
}
//print (calibState);
if(printed == false)
{
print ("Gaze pos x = " + gazePosX);
print ("Gaze pos Y = " + gazePosY);
print ("Scale x = " + gazeScaleX);
print ("Scale y = " + gazeScaleY);
print ("Gaze width = " + gazeWidth);
print ("Gaze Height = " + gazeHeight);
print ("left eye x = " + leftEyeX);
print ("left eye Y = " + leftEyeY);
print ("calib state = " + calibState);
printed = true;
}
//Cv.ShowImage("Eye tracking", mainImage);
//Cv.ShowImage ("EyeLeft", grayEyeLeft);
//Cv.ShowImage ("EyeRight", grayEyeRight);
}
void EyeDetection()
{
IplImage mainImage = new IplImage (imWidth, imHeight, BitDepth.U8, 3);
IplImage smallImg = new IplImage(mainImage.Width, mainImage.Height ,BitDepth.U8, 1);
Cv.Resize (camImage, mainImage, Interpolation.Linear);
IplImage gray = new IplImage(mainImage.Size, BitDepth.U8, 1);
Cv.CvtColor (mainImage, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
Cv.ReleaseImage (gray);
//IplImage hack = Cv.LoadImage("\\Users\\User\\Desktop\\Honours Projects\\Project10\\Project\\Assets\\bug.jpeg");
//Cv.Erode (hack, hack);
//Cv.ReleaseImage (hack);
//uint sizeStore = 2877212;
CvHaarClassifierCascade cascadeFace = CvHaarClassifierCascade.FromFile("\\Users\\User\\Documents\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml");
CvMemStorage storageFace = new CvMemStorage();
storageFace.Clear ();
CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascadeFace, storageFace, ScaleFactor, MinNeighborsFace, 0, new CvSize(30,30));
for(int j = 0; j < faces.Total; j++)
{
CvRect face = faces[j].Value.Rect;
CvHaarClassifierCascade cascadeEye = CvHaarClassifierCascade.FromFile ("\\Users\\User\\Documents\\opencv\\sources\\data\\haarcascades\\haarcascade_eye.xml");
IplImage faceImg = new IplImage(face.Width, face.Height, BitDepth.U8, 1);
IplImage faceImgColour = new IplImage(face.Width, face.Height, BitDepth.U8, 3);
CvMemStorage storage = new CvMemStorage();
storage.Clear ();
Cv.SetImageROI(smallImg, face);
Cv.Copy (smallImg, faceImg);
Cv.ResetImageROI(smallImg);
Cv.SetImageROI(mainImage, face);
Cv.Copy (mainImage, faceImgColour);
Cv.ResetImageROI(mainImage);
Cv.ShowImage ("Face", faceImgColour);
CvSeq<CvAvgComp> eyes = Cv.HaarDetectObjects(faceImg, cascadeEye, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
for(int i = 0; i < eyes.Total; i++)
{
CvRect r = eyes[i].Value.Rect;
Cv.SetImageROI(faceImgColour, r);
if(i == 1)
{
IplImage eyeLeft = new IplImage(new CvSize(r.Width, r.Height), BitDepth.U8, 3);
Cv.Copy(faceImgColour, eyeLeft);
IplImage yuv = new IplImage(eyeLeft.Size, BitDepth.U8, 3);
IplImage dst = new IplImage(eyeLeft.Size, BitDepth.U8, 3);
IplImage grayEyeLeft = new IplImage(eyeLeft.Size, BitDepth.U8, 1);
IplImage eyeLeftFinal = new IplImage(Cv.Round(grayEyeLeft.Width * scaleEye), Cv.Round(grayEyeLeft.Height * scaleEye), BitDepth.U8, 1);
Cv.CvtColor(eyeLeft, yuv, ColorConversion.BgrToCrCb);
Cv.Not(yuv, dst);
Cv.CvtColor(dst,eyeLeft,ColorConversion.CrCbToBgr);
Cv.CvtColor(eyeLeft, grayEyeLeft, ColorConversion.BgrToGray);
Cv.Resize (grayEyeLeft, eyeLeftFinal, Interpolation.Linear);
Cv.Threshold(eyeLeftFinal, eyeLeftFinal, 230, 230, ThresholdType.Binary);
CvBlobs b1 = new CvBlobs(eyeLeftFinal);
if(b1.Count > 0)
{
leftEyeX = b1.LargestBlob().Centroid.X;
leftEyeY = b1.LargestBlob().Centroid.Y;
}
Cv.ShowImage ("EyeLeft", eyeLeftFinal);
Cv.ReleaseImage (yuv);
Cv.ReleaseImage (dst);
Cv.ReleaseImage (grayEyeLeft);
Cv.ReleaseImage (eyeLeftFinal);
b1.Clear();
Cv.ReleaseImage (eyeLeft);
}
if(i == 0)
{
IplImage eyeRight = new IplImage(new CvSize(r.Width, r.Height), BitDepth.U8, 3);
Cv.Copy(faceImgColour, eyeRight);
IplImage yuv2 = new IplImage(eyeRight.Size, BitDepth.U8, 3);
IplImage dst2 = new IplImage(eyeRight.Size, BitDepth.U8, 3);
IplImage grayEyeRight = new IplImage(eyeRight.Size, BitDepth.U8, 1);
IplImage eyeRightFinal = new IplImage(Cv.Round(grayEyeRight.Width * scaleEye), Cv.Round(grayEyeRight.Height * scaleEye), BitDepth.U8, 1);
Cv.CvtColor(eyeRight, yuv2, ColorConversion.BgrToCrCb);
Cv.Not(yuv2, dst2);
Cv.CvtColor(dst2,eyeRight,ColorConversion.CrCbToBgr);
Cv.CvtColor(eyeRight, grayEyeRight, ColorConversion.BgrToGray);
Cv.Resize (grayEyeRight, eyeRightFinal, Interpolation.Linear);
Cv.Threshold(eyeRightFinal, eyeRightFinal, 230, 230, ThresholdType.Binary);
CvBlobs b2 = new CvBlobs(eyeRightFinal);
if(b2.Count > 0)
{
rightEyeX = b2.LargestBlob().Centroid.X;
rightEyeY = b2.LargestBlob().Centroid.Y;
}
Cv.ShowImage ("EyeRight", eyeRightFinal);
Cv.ReleaseImage (yuv2);
Cv.ReleaseImage (dst2);
Cv.ReleaseImage (grayEyeRight);
Cv.ReleaseImage (eyeRightFinal);
b2.Clear ();
Cv.ReleaseImage (eyeRight);
}
Cv.ResetImageROI(faceImgColour);
}
//Cv.ShowImage("Eye tracking", mainImage);
Cv.ReleaseImage (faceImg);
Cv.ReleaseImage (faceImgColour);
Cv.ReleaseMemStorage(storage);
Cv.ReleaseHaarClassifierCascade(cascadeEye);
}
Cv.ReleaseMemStorage(storageFace);
Cv.ReleaseHaarClassifierCascade(cascadeFace);
//PupilTracking ();
Cv.ReleaseImage(smallImg);
Cv.ReleaseImage (mainImage);
GC.Collect();
}
void OnGUI ()
{
GUI.Label (new Rect (200, 200, 100, 90), errorMsg);
}
void OnDestroy()
{
Cv.DestroyAllWindows();
Cv.ReleaseCapture(cap);
}
最佳答案
我不熟悉 OpenCV,但一般来说:
new CvMemStorage()
CvHaarClassifierCascade.FromFile("\\Users\\User\\Documents\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml");
应该在启动时加载一次并分配给类变量。我发现在大多数情况下,有足够的 RAM 可供使用。我在 Start()
上分配要反复使用的内容,尤其是在 Update()
循环中每秒 60 次!
但加载 XML 数据、分配和释放变量(如 storage
或 cascadeEye
)必然会在应用每秒尝试这样做 60 次时产生问题。
创建和销毁对象是非常、非常、非常昂贵的。因此请明智而谨慎地这样做,尤其是在处理 OpenCV 对象、位图或加载器等复杂数据结构时。
嗯。
关于c# - 内存泄漏问题。使用 OpenCVSharp 在 Unity 中进行眼动追踪,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/23455458/
我有一个 RelativeLayout,我从 drawable 设置了背景。我能够将 RelativeLayout 的背景更改为另一个RadioButton 被选中。但是当它发生变化时我该如何给它一个
我正在尝试在 Google 的 Play 报亭应用中复制此动画: http://i.imgur.com/UuX1PRO.webm 我的布局看起来像这样: ... more
我一直在评估 Airflow 。我有一个用例,我有一个每小时运行一次的工作流,以获得每小时的数据聚合。另一个每天运行以获得相同的每日聚合。是否可以创建一个组合工作流,其中仅当所有小时聚合在过去一天都成
我有下一个结构: Activity 1: Activity 2: Form to add new item to the recycler View. RecyclerView
我只是想知道 JavaFx 中是否有任何简单的动 Canvas 局方法,例如 VBox 和 HBox。我希望我的应用程序在指定时间后更改 VBox 的背景颜色。但我意识到没有任何类似于 FillTra
我正在使用 Angular 4 动画在按钮上测试一个简单的淡入/淡出动画。我遇到的问题是,因为我使用的是 bool 值,所以没有任何东西被触发。从开发工具来看,它看起来像一个 .ng-animatin
有没有人在 SublimeREPL 中使用 irb 交换 pry 有任何运气?我很接近,我想。我没有收到错误,但是当我输入命令时也没有收到响应。每次我点击返回时,它的行为就像缓冲区被重置一样。 我正在
今天要向小伙伴们介绍的是一个能够快速地把数据制作成可视化、交互页面的 Python 框架:Streamlit,分分钟让你的数据动起来! 犹记得我在做机器学习和数据分析方面的毕设时,
简而言之,我想缩放 View - 就像 Android Market 一样,当您单击“更多”按钮时,例如在“描述”上。 我发现,Android Market 具有以下结构的布局: > 64d
我似乎无法让它工作。 我正在尝试每天发送一个给定的文件,其名称类似于“file_{{ds_nodash}}.csv”。 问题是我似乎无法将此名称添加为文件名,因为它似乎无法使用。在电子邮件的正文或主题
当您调整窗口大小时, float 的 div 将按预期换行到下一行。但我真的很希望这种布局变化是动画化的。 编辑:顺便说一句,找到一个不依赖于 JQuery 的解决方案会很好。如果需要,我不介意编写自
我有一个复杂的数据处理管道,目前在单台机器上用 Python 实现。 管道是围绕处理属于一系列实现文档、页面、单词等的自定义类的对象而构建的。该管道中的大多数操作都是令人尴尬地并行的——它们处理单个文
我是一名优秀的程序员,十分优秀!