首先我要说的是,我确实已经遵循了许多教程,例如整个位于EmguCv主站点上的教程,但是抛出了一个TypeInitializationException。
现在,仔细聆听,因为这里非常奇怪。我首先要说的是我的问题有三个“级别”,但是,所有“级别”中的代码完全相同,即使没有任何变化。这自然会指出我有一个引用或链接问题,但我再次尝试了不同的教程,但无济于事。
1级(此级别产生TypeInitializationException)
我创建一个新项目,正确引用所有内容等,然后在这个新项目中编写我的代码。在调试时,我抛出异常并退出程序。以下是问题图片的链接:http://prntscr.com/uychc
等级2(此等级完全正常,不会抛出异常)
在这个级别中,我几乎找到了EmguCv的一个示例项目(在本例中为VideoSurveilance),然后删除默认代码并将我的所有代码复制并粘贴到那里。添加了我需要的更多参考文献后,程序运行正常。我不能发布超过3个链接,但你必须相信我能正确显示视频图片。
等级3(这个级别不会引发异常,但会警告我一个人的“第一次机会”) 在这个级别中,我将整个Level 2项目复制并粘贴到另一个目录中。找到并重新链接丢失的文件/引用后,我能够运行该程序,但图片没有显示,我得到一个“类型的第一次机会异常”System.TypeInitializationException“发生在Emgu.CV.dll警告。{{3 }}
我目前运行Windows 7 x64(是的,我将构建选项更改为x64和x64 .dll)并运行EmguCv 2.4.9和2.4.2(在两者上测试)和Visual Studios 2010和2012(在两者上测试)。
以下是可能值得的代码:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
//using System.Threading.Tasks;
using System.Windows.Forms;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
using Microsoft.Kinect;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using Emgu.CV.UI;
using System.IO;
namespace VideoSurveilance
{
public partial class VideoSurveilance : Form
{
KinectSensor sensor;
WriteableBitmap depthBitmap;
WriteableBitmap colorBitmap;
DepthImagePixel[] depthPixels;
byte[] colorPixels;
int blobCount = 0;
public VideoSurveilance()
{
InitializeComponent();
}
private void VideoSurveilance_Load(object sender, System.EventArgs e)
{
foreach (var potentialSensor in KinectSensor.KinectSensors)
{
if (potentialSensor.Status == KinectStatus.Connected)
{
this.sensor = potentialSensor;
break;
}
}
if (null != this.sensor)
{
this.sensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30);
this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);
this.colorPixels = new byte[this.sensor.ColorStream.FramePixelDataLength];
this.depthPixels = new DepthImagePixel[this.sensor.DepthStream.FramePixelDataLength];
this.colorBitmap = new WriteableBitmap(this.sensor.ColorStream.FrameWidth, this.sensor.ColorStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);
this.depthBitmap = new WriteableBitmap(this.sensor.DepthStream.FrameWidth, this.sensor.DepthStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);
WriteableBitmap bitmap;
bitmap = new WriteableBitmap(this.sensor.DepthStream.FrameWidth, this.sensor.DepthStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);
byte[] retVal = new byte[bitmap.PixelWidth * bitmap.PixelHeight * 4];
bitmap.CopyPixels(new Int32Rect(0, 0, bitmap.PixelWidth, bitmap.PixelHeight), retVal, bitmap.PixelWidth * 4, 0);
Bitmap b = new Bitmap(bitmap.PixelWidth, bitmap.PixelHeight);
int k = 0;
byte red, green, blue, alpha;
for (int i = 0; i < bitmap.PixelWidth; i++)
{
for (int j = 0; j < bitmap.PixelHeight && k < retVal.Length; j++)
{
alpha = retVal[k++];
blue = retVal[k++];
green = retVal[k++];
red = retVal[k++];
System.Drawing.Color c = new System.Drawing.Color();
c = System.Drawing.Color.FromArgb(alpha, red, green, blue);
b.SetPixel(i, j, c);
}
}
Image<Bgr, Byte> temp = new Image<Bgr, byte>(b);
this.ibOriginal.Image = temp;
this.sensor.AllFramesReady += this.sensor_AllFramesReady;
try
{
this.sensor.Start();
}
catch (IOException)
{
this.sensor = null;
}
}
}
private void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
blobCount = 0;
BitmapSource depthBmp = null;
Image<Bgr, Byte> openCVImg;
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame != null)
{
blobCount = 0;
if (colorFrame != null)
{
byte[] pixels = new byte[colorFrame.PixelDataLength];
colorFrame.CopyPixelDataTo(pixels);
int stride = colorFrame.Width * 4;
BitmapSource color = BitmapImage.Create(colorFrame.Width, colorFrame.Height, 96, 96, PixelFormats.Bgr32, null, pixels, stride);
openCVImg = new Image<Bgr, byte>(color.ToBitmap());
}
else
{
return;
}
Image<Gray, byte> gray_image;
using (MemStorage stor = new MemStorage())
{
gray_image = openCVImg.InRange(new Bgr(0, 0, 150), new Bgr(200, 200, 255));
gray_image = gray_image.SmoothGaussian(9);
CircleF[] circles = gray_image.HoughCircles(new Gray(100),
new Gray(50),
2,
gray_image.Height / 4,
10,
400)[0];
foreach (CircleF circle in circles)
{
CvInvoke.cvCircle(openCVImg,
new System.Drawing.Point(Convert.ToInt32(circle.Center.X), Convert.ToInt32(circle.Center.Y)),
3,
new MCvScalar(0, 255, 0),
-1,
LINE_TYPE.CV_AA,
0);
openCVImg.Draw(circle,
new Bgr(System.Drawing.Color.Red),
3);
}
}
ibOriginal.Image = openCVImg;
ibProcessed.Image = gray_image;
}
}
}
}
}
}
using System;
using System.Globalization;
using System.IO;
using System.Windows;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using Microsoft.Kinect;
using System.ComponentModel;
using System.Runtime.InteropServices;
using Emgu.CV;
namespace VideoSurveilance
{
public static class Helper
{
private const int MaxDepthDistance = 4000;
private const int MinDepthDistance = 850;
private const int MaxDepthDistanceOffset = 3150;
public static BitmapSource SliceDepthImage(this DepthImageFrame image, int min = 20, int max = 1000)
{
int width = image.Width;
int height = image.Height;
//var depthFrame = image.Image.Bits;
short[] rawDepthData = new short[image.PixelDataLength];
image.CopyPixelDataTo(rawDepthData);
var pixels = new byte[height * width * 4];
const int BlueIndex = 0;
const int GreenIndex = 1;
const int RedIndex = 2;
for (int depthIndex = 0, colorIndex = 0;
depthIndex < rawDepthData.Length && colorIndex < pixels.Length;
depthIndex++, colorIndex += 4)
{
// Calculate the distance represented by the two depth bytes
int depth = rawDepthData[depthIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth;
// Map the distance to an intesity that can be represented in RGB
var intensity = CalculateIntensityFromDistance(depth);
if (depth > min && depth < max)
{
// Apply the intensity to the color channels
pixels[colorIndex + BlueIndex] = intensity; //blue
pixels[colorIndex + GreenIndex] = intensity; //green
pixels[colorIndex + RedIndex] = intensity; //red
}
}
return BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgr32, null, pixels, width * 4);
}
public static byte CalculateIntensityFromDistance(int distance)
{
// This will map a distance value to a 0 - 255 range
// for the purposes of applying the resulting value
// to RGB pixels.
int newMax = distance - MinDepthDistance;
if (newMax > 0)
return (byte)(255 - (255 * newMax
/ (MaxDepthDistanceOffset)));
else
return (byte)255;
}
public static System.Drawing.Bitmap ToBitmap(this BitmapSource bitmapsource)
{
System.Drawing.Bitmap bitmap;
using (var outStream = new MemoryStream())
{
// from System.Media.BitmapImage to System.Drawing.Bitmap
BitmapEncoder enc = new BmpBitmapEncoder();
enc.Frames.Add(BitmapFrame.Create(bitmapsource));
enc.Save(outStream);
bitmap = new System.Drawing.Bitmap(outStream);
return bitmap;
}
}
[DllImport("gdi32")]
private static extern int DeleteObject(IntPtr o);
/// <summary>
/// Convert an IImage to a WPF BitmapSource. The result can be used in the Set Property of Image.Source
/// </summary>
/// <param name="image">The Emgu CV Image</param>
/// <returns>The equivalent BitmapSource</returns>
public static BitmapSource ToBitmapSource(IImage image)
{
using (System.Drawing.Bitmap source = image.Bitmap)
{
IntPtr ptr = source.GetHbitmap(); //obtain the Hbitmap
BitmapSource bs = System.Windows.Interop.Imaging.CreateBitmapSourceFromHBitmap(
ptr,
IntPtr.Zero,
Int32Rect.Empty,
System.Windows.Media.Imaging.BitmapSizeOptions.FromEmptyOptions());
DeleteObject(ptr); //release the HBitmap
return bs;
}
}
}
}
我真诚地感谢那些甚至试图帮助我的人,并希望任何有类似问题的人都能从这个长期问题中受益。
答案 0 :(得分:3)
发现蛮力&#39;溶液:
你必须把&#34; cvextern.dll&#34;将文件放入您的构建目录(x64,调试,发布或其他)手动。
这样做了。我无法相信我花了一整天时间试图解决这个问题。
答案 1 :(得分:0)
正如奥利弗所说,你的问题与这个问题非常相似:EmguCV TypeInitializationException 由于无法以opencv_highgui220.dll访问某些dll而引发异常。在CvInvoke中你会找到DLLImports,也许你应该插入一个断点并观察它的来源。
我想你已经从emgu网站上读过this