gpt4 book ai didi

android - 从 Android 服务中运行 OpenCV 眼睛检测

转载 作者:太空宇宙 更新时间:2023-11-03 21:34:57 27 4
gpt4 key购买 nike

我想通过 OpenCV4Android 从 Android 后台服务运行眼睛检测。我有一段运行良好但作为 Activity 而非服务的代码。我了解Android相机必须有预览才能打开。所以我创建了一个预览(小的让它看起来隐藏起来,因为我希望处理在后台进行)并启动相机进行录制。相机成功启动,但 OpenCV 未检测到眼睛和面部。它只加载 xml 分类器。我希望在我打开相机进行录制时调用 OpenCV 的回调,如 onCameraViewStartedonCameraFrame,但它们没有。

代码如下:

public class BackgroundService extends Service implements SurfaceHolder.Callback, CameraBridgeViewBase.CvCameraViewListener2 {

private static final String TAG = "OCVSample::Activity";
private static final Scalar FACE_RECT_COLOR = new Scalar(0, 255, 0, 255);
public static final int JAVA_DETECTOR = 0;
private static final int TM_SQDIFF = 0;
private static final int TM_SQDIFF_NORMED = 1;
private static final int TM_CCOEFF = 2;
private static final int TM_CCOEFF_NORMED = 3;
private static final int TM_CCORR = 4;
private static final int TM_CCORR_NORMED = 5;


private int learn_frames = 0;
private Mat templateR;//right eye template
private Mat templateL; // left eye template
int method = 0;

private MenuItem mItemFace50;
private MenuItem mItemFace40;
private MenuItem mItemFace30;
private MenuItem mItemFace20;
private MenuItem mItemType;

private Mat mRgba;
private Mat mGray;
// matrix for zooming
private Mat mZoomWindow;
private Mat mZoomWindow2;

private File mCascadeFile;
private CascadeClassifier mJavaDetector;
private CascadeClassifier mJavaDetectorEye;


private int mDetectorType = JAVA_DETECTOR;
private String[] mDetectorName;

private float mRelativeFaceSize = 0.2f;
private int mAbsoluteFaceSize = 0;

private CameraBridgeViewBase mOpenCvCameraView;

private SeekBar mMethodSeekbar;
private TextView mValue;

double xCenter = -1;
double yCenter = -1;

MediaRecorder mediaRecorder;
// Binder given to clients
private final IBinder mBinder = new LocalBinder();

public class LocalBinder extends Binder {
BackgroundService getService() {
// Return this instance of this service so clients can call public methods
return BackgroundService.this;
}
}//end inner class that returns an instance of the service.

@Override
public IBinder onBind(Intent intent) {

return mBinder;
}//end onBind.


private WindowManager windowManager;
private SurfaceView surfaceView;
private Camera camera = null;

@Override
public void onCreate() {


// Start foreground service to avoid unexpected kill
Notification notification = new Notification.Builder(this)
.setContentTitle("Background Video Recorder")
.setContentText("")
.setSmallIcon(R.drawable.vecsat_logo)
.build();
startForeground(1234, notification);

// Create new SurfaceView, set its size to 1x1, move it to the top left corner and set this service as a callback
windowManager = (WindowManager) this.getSystemService(Context.WINDOW_SERVICE);
surfaceView = new SurfaceView(this);
WindowManager.LayoutParams layoutParams = new WindowManager.LayoutParams(
100, 100,
WindowManager.LayoutParams.TYPE_SYSTEM_OVERLAY,
WindowManager.LayoutParams.FLAG_WATCH_OUTSIDE_TOUCH,
PixelFormat.TRANSLUCENT
);

Log.i(TAG, "100 x 100 executed");

layoutParams.gravity = Gravity.LEFT | Gravity.TOP;
windowManager.addView(surfaceView, layoutParams);
surfaceView.getHolder().addCallback(this);


//constructor:
mDetectorName = new String[2];// contains 3 positions..
mDetectorName[JAVA_DETECTOR] = "Java"; //let the detector be of type java detector, specify that in the JAVA_DETECTOR index.

Log.i(TAG, "Instantiated new " + ((Object) this).getClass().getSimpleName());

OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_11, this,
mLoaderCallback); //once the application is resumed reload the library.

}

// Method called right after Surface created (initializing and starting MediaRecorder)
@Override
public void surfaceCreated(SurfaceHolder surfaceHolder) {

Log.i(TAG, "surfaceCreated method");

camera = Camera.open(1);
camera.unlock();


mediaRecorder = new MediaRecorder();



mediaRecorder.setPreviewDisplay(surfaceHolder.getSurface());
mediaRecorder.setCamera(camera);
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.CAMCORDER);
mediaRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA);
mediaRecorder.setProfile(CamcorderProfile.get(CamcorderProfile.QUALITY_HIGH));

mediaRecorder.setOutputFile(
Environment.getExternalStorageDirectory()+"/"+
DateFormat.format("yyyy-MM-dd_kk-mm-ss", new Date().getTime())+
".mp4"
);

try { mediaRecorder.prepare(); } catch (Exception e) {}
mediaRecorder.start();


}


// Stop recording and remove SurfaceView
@Override
public void onDestroy() {

Log.i(TAG, "surfaceDestroyed method");

camera.lock();
camera.release();

windowManager.removeView(surfaceView);

}

@Override
public void surfaceChanged(SurfaceHolder surfaceHolder, int format, int width, int height) {}

@Override
public void surfaceDestroyed(SurfaceHolder surfaceHolder) {


}

private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
//int status, status of initialization, sucess or not..
//now make a switch for the status cases: under success case do the work, load the classifiers..

switch (status) {
case LoaderCallbackInterface.SUCCESS: {
Log.i(TAG, "OpenCV loaded successfully"); // was loaded and initialized successfully..

try {
// load cascade file from application resources
InputStream is = getResources().openRawResource(
R.raw.lbpcascade_frontalface); // get the face classifier from the resource.
File cascadeDir = getDir("cascade", Context.MODE_PRIVATE);
mCascadeFile = new File(cascadeDir,
"lbpcascade_frontalface.xml"); // create a directory inside your app, and a file inside it to store the
FileOutputStream os = new FileOutputStream(mCascadeFile); // prepare an output stream that will write the classifier's code on the file in the app.

//read and write
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = is.read(buffer)) != -1) {
os.write(buffer, 0, bytesRead);
}
is.close();
os.close();

// --------------------------------- load left eye
// classificator -----------------------------------
InputStream iser = getResources().openRawResource(
R.raw.haarcascade_lefteye_2splits);
File cascadeDirER = getDir("cascadeER",
Context.MODE_PRIVATE);
File cascadeFileER = new File(cascadeDirER,
"haarcascade_eye_right.xml");
FileOutputStream oser = new FileOutputStream(cascadeFileER);

byte[] bufferER = new byte[4096];
int bytesReadER;
while ((bytesReadER = iser.read(bufferER)) != -1) {
oser.write(bufferER, 0, bytesReadER);
}
iser.close();
oser.close();

//check if you can load the classifer.
mJavaDetector = new CascadeClassifier(
mCascadeFile.getAbsolutePath());
if (mJavaDetector.empty()) {
Toast.makeText(getApplicationContext(), "face classifier error", Toast.LENGTH_LONG).show();
Log.e(TAG, "Failed to load cascade face classifier");
mJavaDetector = null;
} else
Log.i(TAG, "Loaded cascade classifier from "
+ mCascadeFile.getAbsolutePath());

mJavaDetectorEye = new CascadeClassifier(
cascadeFileER.getAbsolutePath());
if (mJavaDetectorEye.empty()) {
Toast.makeText(getApplicationContext(), "eye classifer error", Toast.LENGTH_LONG).show();
Log.e(TAG, "Failed to load cascade eye classifier");
mJavaDetectorEye = null;
} else
Log.i(TAG, "Loaded cascade classifier from "
+ mCascadeFile.getAbsolutePath());



cascadeDir.delete();

} catch (IOException e) {
e.printStackTrace();
Log.e(TAG, "Failed to load cascade. Exception thrown: " + e);
}

//Whether classifiers are opened or not, open the front camera.
// mOpenCvCameraView.setCameraIndex(1);
//mOpenCvCameraView.enableFpsMeter(); // What is this? This method enables label with fps value on the screen
// mOpenCvCameraView.enableView(); // What? This means enable connecting to the camera.

}
break;
default: {
//When the loading of the libarary is failed
super.onManagerConnected(status);
}
break;
}
}
}; // end the class.



public void onCameraViewStarted(int width, int height) {
Log.i(TAG, "onCameraViewStarted method");


//onCameraViewStarted callback will be delivered only after enableView is called and surface is available
//This method is a member of CvCameraViewListener2, and we must implement it.
mGray = new Mat(); //initialize new gray scale matrix to contain the img pixels.
mRgba = new Mat(); //initialize new rgb matrix to contain the img pixels.
}

public void onCameraViewStopped() {
Log.i(TAG, "onCameraViewStopped method");


//Release the allocated memory
//release the matrix, this releases the allocated space in memory, since mat contains a header that contains img info and a pointer that points to the matrix in the memory.
mGray.release();
mRgba.release();
mZoomWindow.release();
mZoomWindow2.release();
}

public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Log.i(TAG, "onCameraFrame method");


//This method is a member of CvCameraViewListener2, and we must implement it.
// In this method we get every frame from the camera and process it in order to track the objects.
//inputFrame is the received frame from the camera.
mRgba = inputFrame.rgba(); //convert the frame to rgba scale, then assign this value to the rgba Mat img matrix.
mGray = inputFrame.gray(); //convert the frame to gray scale, then assign this value to the gray Mat img matrix.

//Shall we consider Flipping the camera img horizontally.


if (mAbsoluteFaceSize == 0) {
int height = mGray.rows(); //get the height of the captured frame stored in mgray Mat array (rows), why gray to rgb???
if (Math.round(height * mRelativeFaceSize) > 0) { //multiply that height with 0.2... Is the result > 0?
//if yes this indicates that there is a frame that was captured (it's height is not zero), so set the face size to
// Math.round(height * mRelativeFaceSize)
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
}

if (mZoomWindow == null || mZoomWindow2 == null)
CreateAuxiliaryMats();

MatOfRect faces = new MatOfRect(); //a matrix that will contain rectangles around the face (including the faces inside the rectangles), it will be filled by detectMultiScale method.

//if mJavaDetector is not null, this contains the face classifier that we have loaded previously
if (mJavaDetector != null)
//if not null, use this classifier to detect faces.
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2,
2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize),
new Size());
//in th function detectMultiScale above,
// faces is the array that will contain the rectangles around the detected face.
// the 3rd param: specifies how much the image size is reduced at each image scale.
//4th param: Parameter specifying how many neighbors each candidate rectangle should have to retain it.
//5: :)
//6: Minimum possible object size. Objects smaller than that are ignored (if you set a very small minimum value, your app will run heavily).
//7: Maximum possible object size. Objects larger than that are ignored. Both minimum and maximum should be set carefully to avoid slow running of the app.

Rect[] facesArray = faces.toArray(); //array of faces


for (int i = 0; i < facesArray.length; i++) {
/* Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(),
FACE_RECT_COLOR, 3);*/

//Now draw rectangles around the obtained faces, and a circle at each rectangle center.

//mrgba in the line bellow means that the rectangle should be drawn on the colored img.
//facesArray[i].tl() returns a Point: Template class for 2D points specified by its coordinates x and y -> Template class
// facesArray[i].x and facesArray[i].y are the x and y coords of the top left top corner.
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);

//calculate the center in x and y coords.
xCenter = (facesArray[i].x + facesArray[i].width + facesArray[i].x) / 2;
yCenter = (facesArray[i].y + facesArray[i].y + facesArray[i].height) / 2;
Point center = new Point(xCenter, yCenter); //store the center.
//Imgproc.circle(mRgba, center, 10, new Scalar(255, 0, 0, 255), 3);

Core.circle(mRgba, center, 10, new Scalar(255, 0, 0, 255), 3); //draw a red circle at the center of the face rectangle.

/*Imgproc.putText(mRgba, "[" + center.x + "," + center.y + "]",
new Point(center.x + 20, center.y + 20),
Core.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255,
255));*/

//write the coordinates of the rectangle center:
Core.putText(mRgba, "[" + center.x + "," + center.y + "]",
new Point(center.x + 20, center.y + 20) , // this is the bottom left corner of the text string
Core.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255,
255));

Rect r = facesArray[i]; //get the currect face, we want to use it to detect the eyes inside it.



// compute the eye area
//Rect (x, y, w, h)
Rect eyearea = new Rect(r.x + r.width / 8,
(int) (r.y + (r.height / 4.5)), r.width - 2 * r.width / 8,
(int) (r.height / 3.0));
// split it
Rect eyearea_right = new Rect(r.x + r.width / 16,
(int) (r.y + (r.height / 4.5)),
(r.width - 2 * r.width / 16) / 2, (int) (r.height / 3.0));
Rect eyearea_left = new Rect(r.x + r.width / 16
+ (r.width - 2 * r.width / 16) / 2,
(int) (r.y + (r.height / 4.5)),
(r.width - 2 * r.width / 16) / 2, (int) (r.height / 3.0));
// draw the area - mGray is working grayscale mat, if you want to
// see area in rgb preview, change mGray to mRgba
/*Imgproc.rectangle(mRgba, eyearea_left.tl(), eyearea_left.br(),
new Scalar(255, 0, 0, 255), 2);
Imgproc.rectangle(mRgba, eyearea_right.tl(), eyearea_right.br(),
new Scalar(255, 0, 0, 255), 2);*/
Core.rectangle(mRgba, eyearea_left.tl(), eyearea_left.br(),
new Scalar(255, 0, 0, 255), 2);
Core.rectangle(mRgba, eyearea_right.tl(), eyearea_right.br(),
new Scalar(255, 0, 0, 255), 2);

if (learn_frames < 5) {
// no learned frames -> Learn templates from at least 5 frames..
templateR = get_template(mJavaDetectorEye, eyearea_right, 24);
templateL = get_template(mJavaDetectorEye, eyearea_left, 24);
learn_frames++;
} else {
// Learning finished, use the new templates for template
// matching
match_eye(eyearea_right, templateR, method);
match_eye(eyearea_left, templateL, method);

}


// cut eye areas and put them to zoom windows
Imgproc.resize(mRgba.submat(eyearea_left), mZoomWindow2,
mZoomWindow2.size());
Imgproc.resize(mRgba.submat(eyearea_right), mZoomWindow,
mZoomWindow.size());


}

return mRgba;
}



private void setMinFaceSize(float faceSize) {
mRelativeFaceSize = faceSize;
mAbsoluteFaceSize = 0;
}


private void CreateAuxiliaryMats() {
if (mGray.empty())
return;

int rows = mGray.rows();
int cols = mGray.cols();

if (mZoomWindow == null) {
mZoomWindow = mRgba.submat(rows / 2 + rows / 10, rows, cols / 2
+ cols / 10, cols);
mZoomWindow2 = mRgba.submat(0, rows / 2 - rows / 10, cols / 2
+ cols / 10, cols);
}

}

private void match_eye(Rect area, Mat mTemplate, int type) {
Point matchLoc;
Mat mROI = mGray.submat(area);
int result_cols = mROI.cols() - mTemplate.cols() + 1;
int result_rows = mROI.rows() - mTemplate.rows() + 1;
// Check for bad template size
if (mTemplate.cols() == 0 || mTemplate.rows() == 0) {
return ;
}
Mat mResult = new Mat(result_cols, result_rows, CvType.CV_8U);

switch (type) {
case TM_SQDIFF:
Imgproc.matchTemplate(mROI, mTemplate, mResult, Imgproc.TM_SQDIFF);
break;
case TM_SQDIFF_NORMED:
Imgproc.matchTemplate(mROI, mTemplate, mResult,
Imgproc.TM_SQDIFF_NORMED);
break;
case TM_CCOEFF:
Imgproc.matchTemplate(mROI, mTemplate, mResult, Imgproc.TM_CCOEFF);
break;
case TM_CCOEFF_NORMED:
Imgproc.matchTemplate(mROI, mTemplate, mResult,
Imgproc.TM_CCOEFF_NORMED);
break;
case TM_CCORR:
Imgproc.matchTemplate(mROI, mTemplate, mResult, Imgproc.TM_CCORR);
break;
case TM_CCORR_NORMED:
Imgproc.matchTemplate(mROI, mTemplate, mResult,
Imgproc.TM_CCORR_NORMED);
break;
}

Core.MinMaxLocResult mmres = Core.minMaxLoc(mResult);
// there is difference in matching methods - best match is max/min value
if (type == TM_SQDIFF || type == TM_SQDIFF_NORMED) {
matchLoc = mmres.minLoc;
} else {
matchLoc = mmres.maxLoc;
}

Point matchLoc_tx = new Point(matchLoc.x + area.x, matchLoc.y + area.y);
Point matchLoc_ty = new Point(matchLoc.x + mTemplate.cols() + area.x,
matchLoc.y + mTemplate.rows() + area.y);
/*Imgproc.rectangle(mRgba, matchLoc_tx, matchLoc_ty, new Scalar(255, 255, 0,
255));*/
Core.rectangle(mRgba, matchLoc_tx, matchLoc_ty, new Scalar(255, 255, 0,
255));
Rect rec = new Rect(matchLoc_tx,matchLoc_ty);


}

private Mat get_template(CascadeClassifier clasificator, Rect area, int size) {
Mat template = new Mat(); //prepare a Mat which will serve as a template for eyes.
Mat mROI = mGray.submat(area); //detect only region of interest which is represented by the area. So, from the total Mat get only the submat that represent roi.


MatOfRect eyes = new MatOfRect(); //will be around eyes (including eyes), this will be filled by detectMultiScale
Point iris = new Point(); //to identify iris.

Rect eye_template = new Rect();


clasificator.detectMultiScale(mROI, eyes, 1.15, 2,
Objdetect.CASCADE_FIND_BIGGEST_OBJECT
| Objdetect.CASCADE_SCALE_IMAGE, new Size(30, 30),
new Size());

Rect[] eyesArray = eyes.toArray(); //get the detected eyes
for (int i = 0; i < eyesArray.length;) {
Rect e = eyesArray[i];
e.x = area.x + e.x; //the starting x coordinates of the rect (area) around the eye + the area
e.y = area.y + e.y;
Rect eye_only_rectangle = new Rect((int) e.tl().x,
(int) (e.tl().y + e.height * 0.4), (int) e.width,
(int) (e.height * 0.6));
mROI = mGray.submat(eye_only_rectangle);
Mat vyrez = mRgba.submat(eye_only_rectangle);


Core.MinMaxLocResult mmG = Core.minMaxLoc(mROI);
// Imgproc.circle(vyrez, mmG.minLoc, 2, new Scalar(255, 255, 255, 255), 2);
Core.circle(vyrez, mmG.minLoc, 2, new Scalar(255, 255, 255, 255), 2);
iris.x = mmG.minLoc.x + eye_only_rectangle.x;
iris.y = mmG.minLoc.y + eye_only_rectangle.y;
eye_template = new Rect((int) iris.x - size / 2, (int) iris.y
- size / 2, size, size);
/*Imgproc.rectangle(mRgba, eye_template.tl(), eye_template.br(),
new Scalar(255, 0, 0, 255), 2);*/
Core.rectangle(mRgba, eye_template.tl(), eye_template.br(),
new Scalar(255, 0, 0, 255), 2);
template = (mGray.submat(eye_template)).clone();
return template;
}
return template;
}

public void onRecreateClick(View v)
{
learn_frames = 0;
}
}

请注意,摄像机成功打开进行录制,并且加载了 xml 文件,但之后没有任何反应。我将窗口大小设置为 100 x 100 只是为了测试目的,我知道它应该是 1 x 1

谁能告诉我如何解决这个问题?如何从后台服务运行 opencv 摄像机进行面部和眼睛跟踪?

最佳答案

我尝试像您一样在服务中获取 opencv 相机,但我无法获得 onCameraFrame 和 onCameraViewStarted 回调,这意味着相机未初始化。经过一系列尝试:

  • 将预览设置为 INVISIBLE/GONE -> 不起作用
  • 将预览大小设置为 1×1 像素大小或符合相机的纵横比 4x3 -> 不工作
  • 在屏幕外设置预览 -> 不起作用

我发现 opencv 相机需要使用 View 大小进行预览,只有这样我才能获得 onCameraFrame 回调。幸运的是,我可以将另一个元素放在相机预览的顶部以将其隐藏,并仅显示警报。

您可以找到一个简单的 CameraInService 示例 here ,希望对你有用。

关于android - 从 Android 服务中运行 OpenCV 眼睛检测,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/35124134/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com