public class testEmguCV : MonoBehaviour
{
private Capture capture;
void Start()
{
capture = new Capture();
}
void Update()
{
Image<Gray, Byte> currentFrame = capture.QueryGrayFrame();
Bitmap bitmapCurrentFrame = currentFrame.ToBitmap();
MemoryStream m = new MemoryStream();
bitmapCurrentFrame.Save(m, bitmapCurrentFrame.RawFormat);
Texture2D camera = new Texture2D(400, 400);
if (currentFrame != null)
{
camera.LoadImage(m.ToArray());
renderer.material.mainTexture = camera;
}
}
}
我使用上面的代码将emgucv相机中的相机输入转换为单独的texture2d,但我遇到bitmapCurrentFrame.Save(m, bitmapCurrentFrame.RawFormat);
的问题
它给出了以下错误
ArgumentNullException:参数不能为null。参数名称: 编码器System.Drawing.Image.Save(System.IO.Stream流, System.Drawing.Imaging.ImageCodecInfo编码器, System.Drawing.Imaging.EncoderParameters encoderParams) System.Drawing.Image.Save(System.IO.Stream流, System.Drawing.Imaging.ImageFormat格式)(包装器 remoting-invoke-with-check)System.Drawing.Image:保存 (System.IO.Stream,System.Drawing.Imaging.ImageFormat) WebcamUsingEmgucv.Update()(在Assets / WebcamUsingEmgucv.cs:51)
经过几个小时的思考和搜索,我不知道发生了什么事,请帮忙
答案 0 :(得分:0)
我在我们的项目中使用了您的示例,谢谢!但我把它修改为:
void Update()
{
if(capture == null)
{
Debug.LogError("Capture is null");
return;
}
Image<Gray, Byte> currentFrame = capture.QueryGrayFrame();
MemoryStream m = new MemoryStream();
currentFrame.Bitmap.Save(m, currentFrame.Bitmap.RawFormat);
Texture2D camera = new Texture2D(400, 400);
if (currentFrame != null)
{
camera.LoadImage(m.ToArray());
renderer.material.mainTexture = camera;
}
}
这是工作! Fps平均约为30-35。祝你好运!
答案 1 :(得分:-1)
尝试使用此: https://github.com/neutmute/emgucv/blob/3ceb85cba71cf957d5e31ae0a70da4bbf746d0e8/Emgu.CV/PInvoke/Unity/TextureConvert.cs 它有这样的东西:
public static Texture2D ImageToTexture2D<TColor, TDepth>(Image<TColor, TDepth> image, bool correctForVerticleFlip)
where TColor : struct, IColor
where TDepth : new()
{
Size size = image.Size;
if (typeof(TColor) == typeof(Rgb) && typeof(TDepth) == typeof(Byte))
{
Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGB24, false);
byte[] data = new byte[size.Width * size.Height * 3];
GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
using (Image<Rgb, byte> rgb = new Image<Rgb, byte>(size.Width, size.Height, size.Width * 3, dataHandle.AddrOfPinnedObject()))
{
rgb.ConvertFrom(image);
if (correctForVerticleFlip)
CvInvoke.cvFlip(rgb, rgb, FLIP.VERTICAL);
}
dataHandle.Free();
texture.LoadRawTextureData(data);
texture.Apply();
return texture;
}
else //if (typeof(TColor) == typeof(Rgba) && typeof(TDepth) == typeof(Byte))
{
Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGBA32, false);
byte[] data = new byte[size.Width * size.Height * 4];
GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
using (Image<Rgba, byte> rgba = new Image<Rgba, byte>(size.Width, size.Height, size.Width * 4, dataHandle.AddrOfPinnedObject()))
{
rgba.ConvertFrom(image);
if (correctForVerticleFlip)
CvInvoke.cvFlip(rgba, rgba, FLIP.VERTICAL);
}
dataHandle.Free();
texture.LoadRawTextureData(data);
texture.Apply();
return texture;
}
//return null;
}
如果您不想使用InterOp,您也可以使用
之类的东西cameraframe.Convert<Rgb,byte>().Data.Cast<byte>().ToArray<byte>()
并使用它代替使用interOp的部分
两种解决方案都适合我。只需记住在更换之前破坏纹理。在我这样做之前,我有内存泄漏问题。