我目前正在开展一个项目,涉及将OpenCVSharp集成到Unity中,以便在游戏环境中进行眼动追踪。我已经设法将OpenCVSharp集成到Unity编辑器中,并且目前在游戏中有眼睛检测(不跟踪)。它可以在网络摄像头图像中找到您的眼睛,然后显示它当前在纹理上检测到的位置,我在场景中显示。
然而它导致巨大的fps下降,主要是因为每一帧都将网络摄像头纹理转换为IPLimage,以便OpenCV可以处理它。然后,在完成所有眼睛检测之后,它必须将其转换回2D纹理以在场景内显示。所以可以理解的是它对CPU来说太过分了。 (据我所知,只能在我的CPU上使用1个核心。)
有没有办法在不将纹理转换为IPLimage的情况下进行所有眼睛检测?或任何其他方式来修复fps下降。我尝试过的一些事情包括:
限制更新的帧。然而,这只是导致它 顺利运行,然后在它必须的框架上可怕地断断续续 更新
看看线程,但据我所知,Unity不允许它。 据我所知,它只能在我的CPU上使用1个核心,这看起来有点傻。如果有办法改变它,它可以解决问题吗?
在相机上尝试了不同的分辨率,但游戏实际上可以平稳运行的分辨率太小,无法实际检测到眼睛,更不用说跟踪了。
我已经包含了下面的代码,如果您希望在代码编辑器中查看它,那么这里是C# File的链接。任何建议或帮助将不胜感激!
作为参考,我使用了来自here (eye detection using opencvsharp)的代码。
using UnityEngine;
using System.Collections;
using System;
using System.IO;
using OpenCvSharp;
//using System.Xml;
//using OpenCvSharp.Extensions;
//using System.Windows.Media;
//using System.Windows.Media.Imaging;
public class CaptureScript : MonoBehaviour
{
public GameObject planeObj;
public WebCamTexture webcamTexture; //Texture retrieved from the webcam
public Texture2D texImage; //Texture to apply to plane
public string deviceName;
private int devId = 1;
private int imWidth = 640; //camera width
private int imHeight = 360; //camera height
private string errorMsg = "No errors found!";
static IplImage matrix; //Ipl image of the converted webcam texture
CvColor[] colors = new CvColor[]
{
new CvColor(0,0,255),
new CvColor(0,128,255),
new CvColor(0,255,255),
new CvColor(0,255,0),
new CvColor(255,128,0),
new CvColor(255,255,0),
new CvColor(255,0,0),
new CvColor(255,0,255),
};
const double Scale = 1.25;
const double ScaleFactor = 2.5;
const int MinNeighbors = 2;
// Use this for initialization
void Start ()
{
//Webcam initialisation
WebCamDevice[] devices = WebCamTexture.devices;
Debug.Log ("num:" + devices.Length);
for (int i=0; i<devices.Length; i++) {
print (devices [i].name);
if (devices [i].name.CompareTo (deviceName) == 1) {
devId = i;
}
}
if (devId >= 0) {
planeObj = GameObject.Find ("Plane");
texImage = new Texture2D (imWidth, imHeight, TextureFormat.RGB24, false);
webcamTexture = new WebCamTexture (devices [devId].name, imWidth, imHeight, 30);
webcamTexture.Play ();
matrix = new IplImage (imWidth, imHeight, BitDepth.U8, 3);
}
}
void Update ()
{
if (devId >= 0)
{
//Convert webcam texture to iplimage
Texture2DtoIplImage();
/*DO IMAGE MANIPULATION HERE*/
//do eye detection on iplimage
EyeDetection();
/*END IMAGE MANIPULATION*/
if (webcamTexture.didUpdateThisFrame)
{
//convert iplimage to texture
IplImageToTexture2D();
}
}
else
{
Debug.Log ("Can't find camera!");
}
}
void EyeDetection()
{
using(IplImage smallImg = new IplImage(new CvSize(Cv.Round (imWidth/Scale), Cv.Round(imHeight/Scale)),BitDepth.U8, 1))
{
using(IplImage gray = new IplImage(matrix.Size, BitDepth.U8, 1))
{
Cv.CvtColor (matrix, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
}
using(CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile (@"C:\Users\User\Documents\opencv\sources\data\haarcascades\haarcascade_eye.xml"))
using(CvMemStorage storage = new CvMemStorage())
{
storage.Clear ();
CvSeq<CvAvgComp> eyes = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
for(int i = 0; i < eyes.Total; i++)
{
CvRect r = eyes[i].Value.Rect;
CvPoint center = new CvPoint{ X = Cv.Round ((r.X + r.Width * 0.5) * Scale), Y = Cv.Round((r.Y + r.Height * 0.5) * Scale) };
int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
matrix.Circle (center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
}
}
}
}
void OnGUI ()
{
GUI.Label (new Rect (200, 200, 100, 90), errorMsg);
}
void IplImageToTexture2D ()
{
int jBackwards = imHeight;
for (int i = 0; i < imHeight; i++) {
for (int j = 0; j < imWidth; j++) {
float b = (float)matrix [i, j].Val0;
float g = (float)matrix [i, j].Val1;
float r = (float)matrix [i, j].Val2;
Color color = new Color (r / 255.0f, g / 255.0f, b / 255.0f);
jBackwards = imHeight - i - 1; // notice it is jBackward and i
texImage.SetPixel (j, jBackwards, color);
}
}
texImage.Apply ();
planeObj.renderer.material.mainTexture = texImage;
}
void Texture2DtoIplImage ()
{
int jBackwards = imHeight;
for (int v=0; v<imHeight; ++v) {
for (int u=0; u<imWidth; ++u) {
CvScalar col = new CvScalar ();
col.Val0 = (double)webcamTexture.GetPixel (u, v).b * 255;
col.Val1 = (double)webcamTexture.GetPixel (u, v).g * 255;
col.Val2 = (double)webcamTexture.GetPixel (u, v).r * 255;
jBackwards = imHeight - v - 1;
matrix.Set2D (jBackwards, u, col);
//matrix [jBackwards, u] = col;
}
}
}
}
答案 0 :(得分:1)
您可以将它们移出每帧更新循环:
using(CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile (@"C:\Users\User\Documents\opencv\sources\data\haarcascades\haarcascade_eye.xml"))
using(CvMemStorage storage = new CvMemStorage())
没有理由每帧构建识别器图形。
如果你想要真正的速度更新,团结本身没有线程化,那么线程是向前发展的合理方式,但是如果你小心的话,你可以在其他线程中弃牌。
做纹理 - &gt;主线程上的ipl图像然后触发一个事件来触发你的线程。 线程可以完成所有的CV工作,可能构造tex2d然后再推回main来渲染。
答案 1 :(得分:1)
如果使用:
,您还应该能够获得一些性能改进 Color32[] pixels;
pixels = new Color32[webcamTexture.width * webcamTexture.height];
webcamTexture.GetPixels32(pixels);
Unity doco建议这可以比调用&#34; GetPixels&#34; (当然比为每个像素调用GetPixel更快),然后您不需要手动将每个RGB通道缩放255个。