我最初不是C ++编码器,因此大部分代码都让我感到头疼。但是,我知道将轮廓添加到以下代码中应该相当简单:
//------------------------------------------------------------------------------
// <copyright file="CoordinateMapperHelper.cpp" company="Microsoft">
// Copyright (c) Microsoft Corporation. All rights reserved.
// </copyright>
//------------------------------------------------------------------------------
// CoordinateMapperHelper.cpp
#include "stdafx.h"
#include "pch.h"
#include "CoordinateMapperHelper.h"
#include <robuffer.h>
#include <math.h>
#include <limits>
#include "opencv2/core/core.hpp"
#include "opencv2/opencv.hpp"
using namespace KinectImageProcessor;
using namespace Platform;
using namespace Windows::Storage::Streams;
using namespace WindowsPreview::Kinect;
using namespace cv;
CoordinateMapperHelper::CoordinateMapperHelper() :
m_depthPoints(nullptr),
m_coordinateMapper(nullptr),
m_colorHeight(0),
m_colorWidth(0),
m_depthHeight(0),
m_depthWidth(0)
{
// get active kinect sensor
KinectSensor^ sensor = WindowsPreview::Kinect::KinectSensor::GetDefault();
// get coordinatemapper
m_coordinateMapper = sensor->CoordinateMapper;
m_depthWidth = sensor->DepthFrameSource->FrameDescription->Width;
m_depthHeight = sensor->DepthFrameSource->FrameDescription->Height;
// create color frame description for RGBa format
FrameDescription^ colorFrameDescription = sensor->ColorFrameSource->CreateFrameDescription(ColorImageFormat::Rgba);
m_colorWidth = colorFrameDescription->Width;
m_colorHeight = colorFrameDescription->Height;
m_depthPoints = ref new Array<DepthSpacePoint>(m_colorWidth * m_colorHeight);
}
bool CoordinateMapperHelper::ProcessMultiFrameBufferData(
_In_ const Platform::Array<UINT16>^ depthDataArray,
_In_ Windows::Storage::Streams::IBuffer^ colorDataBuffer,
_In_ const Platform::Array<UINT8>^ bodyIndexframeArray,
_In_ Windows::Storage::Streams::IBuffer^ outputDataBuffer)
{
if (depthDataArray == nullptr || colorDataBuffer == nullptr || bodyIndexframeArray == nullptr || outputDataBuffer == nullptr)
{
return false;
}
// map color coordinates to depth coordinates
m_coordinateMapper->MapColorFrameToDepthSpace(depthDataArray, m_depthPoints);
// Get access to color buffer
Microsoft::WRL::ComPtr<IBufferByteAccess> spColorBufferByteAccess;
Microsoft::WRL::ComPtr<IInspectable> spColorDataBuffer = reinterpret_cast<IInspectable*>(colorDataBuffer);
HRESULT hr = spColorDataBuffer.As(&spColorBufferByteAccess);
if (FAILED(hr))
{
return false;
}
// Retrieve the color buffer data.
int* pColorData = nullptr;
byte* pColorByteData = nullptr;
hr = spColorBufferByteAccess->Buffer(&pColorByteData);
if (FAILED(hr))
{
return false;
}
pColorData = (int*)pColorByteData;
// Get access to output buffer
Microsoft::WRL::ComPtr<IBufferByteAccess> spOutputBufferByteAccess;
Microsoft::WRL::ComPtr<IInspectable> spOutputDataBuffer = reinterpret_cast<IInspectable*>(outputDataBuffer);
hr = spOutputDataBuffer.As(&spOutputBufferByteAccess);
if (FAILED(hr))
{
return false;
}
// Retrieve the output buffer data.
int* pOutputData = nullptr;
byte* pOutputDataByte = nullptr;
hr = spOutputBufferByteAccess->Buffer(&pOutputDataByte);
if (FAILED(hr))
{
return false;
}
pOutputData = (int*)pOutputDataByte;
DepthSpacePoint* pDepthPoints = m_depthPoints->Data;
byte* pBodyIndexFrameArray = bodyIndexframeArray->Data;
ZeroMemory(pOutputData, outputDataBuffer->Capacity);
// loop over each color pixel
int numColorPixels = m_colorWidth * m_colorHeight;
for (int colorIndex = 0; colorIndex < numColorPixels; ++colorIndex)
{
DepthSpacePoint p = pDepthPoints[colorIndex];
// Infinity means invalid point so we can skip processing on it
if (p.X != -std::numeric_limits<float>::infinity() && p.Y != -std::numeric_limits<float>::infinity())
{
int depthX = static_cast<int>(p.X + 0.5f);
int depthY = static_cast<int>(p.Y + 0.5f);
if ((depthX >= 0 && depthX < m_depthWidth) && (depthY >= 0 && depthY < m_depthHeight))
{
BYTE bodyIndexPixel = pBodyIndexFrameArray[depthX + (depthY * m_depthWidth)];
// if we're tracking a player for the current pixel, copy the pixel from the color data
if (bodyIndexPixel != 0xff)
{
// pOutputData[colorIndex] = pColorData[colorIndex];
pOutputData[colorIndex] = -0xffffff;
}
}
}
}
outputDataBuffer->Length = colorDataBuffer->Length;
return true;
}
我希望ProcessMultiframeBuffer的输出应用轮廓。图像是一个人的silloutte,但我想尽可能地平滑边缘。
谢谢!
我在C#中有代码,但我在移植它时遇到了问题。
using Emgu.CV;
using Emgu.CV.Structure;
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Media.Effects;
namespace Microsoft.Samples.Kinect.BodyIndexBasics
{
/// <summary>
/// Class responsible for extracting out the contours of an image.
/// </summary>
internal class FindContours
{
/// <summary>
/// Method used to process the image and set the output result images.
/// </summary>
/// <param name="colorImage">Source color image.</param>
/// <param name="thresholdValue">Value used for thresholding.</param>
/// <param name="processedGray">Resulting gray image.</param>
/// <param name="processedColor">Resulting color image.</param>
public void IdentifyContours(Bitmap colorImage, int thresholdValue, bool invert, out Bitmap processedGray, out Bitmap processedColor)
{
#region Conversion To grayscale
BlurBitmapEffect myBlurEffect = new BlurBitmapEffect();
Image<Gray, byte> grayImage = new Image<Gray, byte>(colorImage);
Image<Bgr, byte> color = new Image<Bgr, byte>(new Bitmap(colorImage.Width, colorImage.Height));
#endregion Conversion To grayscale
#region Image normalization and inversion (if required)
grayImage = grayImage.ThresholdBinary(new Gray(thresholdValue), new Gray(255));
if (invert)
{
grayImage._Not();
}
#endregion Image normalization and inversion (if required)
#region Extracting the Contours
using (MemStorage storage = new MemStorage())
{
for (Contour<Point> contours = grayImage.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, storage); contours != null; contours = contours.HNext)
{
Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.015, storage);
if (currentContour.BoundingRectangle.Width > 20)
{
CvInvoke.cvDrawContours(color, contours, new MCvScalar(255), new MCvScalar(255), -1, 5, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0));
}
}
}
#endregion Extracting the Contours
#region Asigning output
processedColor = color.ToBitmap();
processedGray = grayImage.ToBitmap();
#endregion Asigning output
}
}
}
更新:我正在使用以下代码作为尝试转换。
outputDataBuffer->Length = colorDataBuffer->Length;
IplImage * ovImage = NULL;
ovImage = cvCreateImage(cvSize(1920, 1080), 8, 4);
cvSetData(ovImage, pOutputData, ovImage->widthStep);
Mat img = cvarrToMat(ovImage);
Mat imgGray;
cvtColor(img, imgGray, CV_RGB2GRAY);
threshold(imgGray, imgGray, 128, 255, CV_THRESH_BINARY);
findContours(imgGray, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));
contours.resize(contours.size());
for (size_t k = 0; k < contours.size(); k++)
{
approxPolyDP(Mat(contours[k]), contours[k], 5, true);
//drawContours(img, contours, k, Scalar(0, 255, 0), 2, CV_AA, hierarchy, abs(1));
if (contours[k].size() == 20){
++cTriangles;
drawContours(img, contours, k, Scalar(255, 0, 0), 2, CV_AA, hierarchy, abs(1));
}
}
答案 0 :(得分:0)
这是在第二个代码块中转换为C ++的C#代码:
//.h file code:
using namespace Emgu::CV;
using namespace Emgu::CV::Structure;
namespace Microsoft
{
namespace Samples
{
namespace Kinect
{
namespace BodyIndexBasics
{
/// <summary>
/// Class responsible for extracting out the contours of an image.
/// </summary>
class FindContours
{
/// <summary>
/// Method used to process the image and set the output result images.
/// </summary>
/// <param name="colorImage">Source color image.</param>
/// <param name="thresholdValue">Value used for thresholding.</param>
/// <param name="processedGray">Resulting gray image.</param>
/// <param name="processedColor">Resulting color image.</param>
public:
void IdentifyContours(Bitmap *colorImage, int thresholdValue, bool invert, Bitmap *&processedGray, Bitmap *&processedColor);
};
}
}
}
}
//.cpp file code:
using namespace Emgu::CV;
using namespace Emgu::CV::Structure;
namespace Microsoft
{
namespace Samples
{
namespace Kinect
{
namespace BodyIndexBasics
{
void FindContours::IdentifyContours(Bitmap *colorImage, int thresholdValue, bool invert, Bitmap *&processedGray, Bitmap *&processedColor)
{
BlurBitmapEffect *myBlurEffect = new BlurBitmapEffect();
Image<Gray*, unsigned char> *grayImage = new Image<Gray*, unsigned char>(colorImage);
Image<Bgr*, unsigned char> *color = new Image<Bgr*, unsigned char>(new Bitmap(colorImage->Width, colorImage->Height));
grayImage = grayImage->ThresholdBinary(new Gray(thresholdValue), new Gray(255));
if (invert)
{
grayImage->_Not();
}
MemStorage *storage = new MemStorage();
try
{
for (Contour<Point> contours = grayImage->FindContours(Emgu::CV::CvEnum::CHAIN_APPROX_METHOD::CV_CHAIN_APPROX_SIMPLE, Emgu::CV::CvEnum::RETR_TYPE::CV_RETR_TREE, storage); contours != nullptr; contours = contours->HNext)
{
Contour<Point> *currentContour = contours->ApproxPoly(contours->Perimeter * 0.015, storage);
if (currentContour->BoundingRectangle->Width > 20)
{
CvInvoke::cvDrawContours(color, contours, new MCvScalar(255), new MCvScalar(255), -1, 5, Emgu::CV::CvEnum::LINE_TYPE::EIGHT_CONNECTED, Point(0, 0));
}
}
}
finally
{
if (storage != nullptr)
{
storage.Dispose();
}
}
processedColor = color->ToBitmap();
processedGray = grayImage->ToBitmap();
}
}
}
}
}