Microsoft Cognitive Face API-如何在Kinect的视频源中获取Face Attributes?

时间:2017-02-10 08:36:16

标签: c# wpf microsoft-cognitive kinect-v2 face-api

我正在探索Microsoft Cognitive Face API,而且我很陌生。我能够通过简单的图像实现面部属性,但我的问题是如何在WPF c#中从Kinect获取实时视频源中人物的面部属性。如果有人可以帮助我,那将是很棒的。在此先感谢!

我已经尝试将Kinect颜色Feed中的帧每2秒捕获到一些文件位置并使用该文件路径并将其转换为Stream,然后将其传递给Face-API函数并且有效。以下是我试过的代码。

namespace CognitiveFaceAPISample
{

    public partial class MainWindow : Window
    {
        private readonly IFaceServiceClient faceServiceClient = new FaceServiceClient("c2446f84b1eb486ca11e2f5d6e670878");
        KinectSensor ks;
        ColorFrameReader cfr;
        byte[] colorData;
        ColorImageFormat format;
        WriteableBitmap wbmp;
        BitmapSource bmpSource;
        int imageSerial;
        DispatcherTimer timer,timer2;
        string streamF = "Frames//frame.jpg";

        public MainWindow()
        {
            InitializeComponent();
            ks = KinectSensor.GetDefault();
            ks.Open();
            var fd = ks.ColorFrameSource.CreateFrameDescription(ColorImageFormat.Bgra);
            uint frameSize = fd.BytesPerPixel * fd.LengthInPixels;
            colorData = new byte[frameSize];
            format = ColorImageFormat.Bgra;
            imageSerial = 0;

            cfr = ks.ColorFrameSource.OpenReader();
            cfr.FrameArrived += cfr_FrameArrived;
        }

        void cfr_FrameArrived(object sender, ColorFrameArrivedEventArgs e)
        {
            if (e.FrameReference == null) return;

            using (ColorFrame cf = e.FrameReference.AcquireFrame())
            {
                if (cf == null) return;
                cf.CopyConvertedFrameDataToArray(colorData, format);
                var fd = cf.FrameDescription;

                // Creating BitmapSource
                var bytesPerPixel = (PixelFormats.Bgr32.BitsPerPixel) / 8;
                var stride = bytesPerPixel * cf.FrameDescription.Width;

                bmpSource = BitmapSource.Create(fd.Width, fd.Height, 96.0, 96.0, PixelFormats.Bgr32, null, colorData, stride);

                // WritableBitmap to show on UI
                wbmp = new WriteableBitmap(bmpSource);
                FacePhoto.Source = wbmp;          

            }
        }

        private void SaveImage(BitmapSource image)
        {
            try
            {
                FileStream stream = new System.IO.FileStream(@"Frames\frame.jpg", System.IO.FileMode.OpenOrCreate);
                JpegBitmapEncoder encoder = new JpegBitmapEncoder();
                encoder.FlipHorizontal = true;
                encoder.FlipVertical = false;
                encoder.QualityLevel = 30;
                encoder.Frames.Add(BitmapFrame.Create(image));
                encoder.Save(stream);
                stream.Close();
            }
            catch (Exception)
            {

            }
        }       


        private void Window_Loaded(object sender, RoutedEventArgs e)
        {
            timer = new DispatcherTimer { Interval = TimeSpan.FromSeconds(2) };
            timer.Tick += Timer_Tick;
            timer.Start();
            timer2 = new DispatcherTimer { Interval = TimeSpan.FromSeconds(5) };
            timer2.Tick += Timer2_Tick;
            timer2.Start();
        }
        private void Timer_Tick(object sender, EventArgs e)
        {
            SaveImage(bmpSource);
        }
        private async void Timer2_Tick(object sender, EventArgs e)
        {
            Title = "Detecting...";
            FaceRectangle[] faceRects = await UploadAndDetectFaces(streamF);
            Face[] faceAttributes = await UploadAndDetectFaceAttributes(streamF);
            Title = String.Format("Detection Finished. {0} face(s) detected", faceRects.Length);

            if (faceRects.Length > 0)
            {
                DrawingVisual visual = new DrawingVisual();
                DrawingContext drawingContext = visual.RenderOpen();
                drawingContext.DrawImage(bmpSource,
                    new Rect(0, 0, bmpSource.Width, bmpSource.Height));
                double dpi = bmpSource.DpiX;
                double resizeFactor = 96 / dpi;

                foreach (var faceRect in faceRects)
                {
                    drawingContext.DrawRectangle(
                        Brushes.Transparent,
                        new Pen(Brushes.Red, 2),
                        new Rect(
                            faceRect.Left * resizeFactor,
                            faceRect.Top * resizeFactor,
                            faceRect.Width * resizeFactor,
                            faceRect.Height * resizeFactor
                            )
                    );
                }

                drawingContext.Close();
                RenderTargetBitmap faceWithRectBitmap = new RenderTargetBitmap(
                    (int)(bmpSource.PixelWidth * resizeFactor),
                    (int)(bmpSource.PixelHeight * resizeFactor),
                    96,
                    96,
                    PixelFormats.Pbgra32);
                faceWithRectBitmap.Render(visual);
                FacePhoto.Source = faceWithRectBitmap;
            }

            if (faceAttributes.Length > 0)
            {
                foreach (var faceAttr in faceAttributes)
                {
                    Label lb = new Label();
                    //Canvas.SetLeft(lb, lb.Width);
                    lb.Content = faceAttr.FaceAttributes.Gender;// + " " + faceAttr.Gender + " " + faceAttr.FacialHair + " " + faceAttr.Glasses + " " + faceAttr.HeadPose + " " + faceAttr.Smile;
                    lb.FontSize = 50;
                    lb.Width = 200;
                    lb.Height = 100;
                    stack.Children.Add(lb);
                }
            }
        }

        private async Task<FaceRectangle[]> UploadAndDetectFaces(string imageFilePath)
        {
            try
            {
                using (Stream imageFileStream = File.OpenRead(imageFilePath))
                {
                    var faces = await faceServiceClient.DetectAsync(imageFilePath);
                    var faceRects = faces.Select(face => face.FaceRectangle);
                    var faceAttrib = faces.Select(face => face.FaceAttributes);
                    return faceRects.ToArray();

                }
            }
            catch (Exception)
            {
                return new FaceRectangle[0];
            }
        }

        private async Task<Face[]> UploadAndDetectFaceAttributes(string imageFilePath)
        {
            try
            {
                using (Stream imageFileStream = File.Open(imageFilePath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
                {
                    var faces = await faceServiceClient.DetectAsync(imageFileStream, true, true, new FaceAttributeType[] { FaceAttributeType.Gender, FaceAttributeType.Age, FaceAttributeType.Smile, FaceAttributeType.Glasses, FaceAttributeType.HeadPose, FaceAttributeType.FacialHair });

                    return faces.ToArray();

                }
            }
            catch (Exception)
            {
                return new Face[0];
            }
        }
}

以上代码运作良好。但是,我想将Kinect Color Feed的每一帧直接转换为Stream,虽然我搜索过但我不知道如何做到这一点但对我来说没有任何作用。如果有人可以帮助我,那么它会很棒。谢谢!

1 个答案:

答案 0 :(得分:1)

不是将框架保存到SaveImage中的文件,而是将其保留到MemoryStream,将其倒回(通过调用Position = 0),然后将该流发送到{{1} }}

另请注意,在DetectAsync()中,您应将UploadAndDetectFaces而不是imageFileStream发送至imageFilePath。无论如何,您可能不想同时拨打DetectAsync()UploadAndDetectFaces,因为您只是将您的工作加倍(以及配额/速率限制命中。)