我正在尝试为一些使用OpenCV的C ++代码编写一个python包装器,但是我很难将结果(一个OpenCV C ++ Mat对象)返回给python解释器。
我查看了OpenCV的源代码,发现文件cv2.cpp具有转换函数,可以在PyObject *和OpenCV的Mat之间来回执行转换。我使用了那些转换函数,但在我尝试使用它们时遇到了分段错误。
我基本上需要一些关于如何使用OpenCV来连接python和C ++代码的建议/示例代码/在线参考,特别是能够将OpenCV的C ++ Mat返回给python解释器,或者可能建议如何/从哪里开始调查分割错误的原因。
目前我正在使用Boost Python来包装代码。
预先感谢任何回复。
相关代码:
// This is the function that is giving the segmentation fault.
PyObject* ABC::doSomething(PyObject* image)
{
Mat m;
pyopencv_to(image, m); // This line gives segmentation fault.
// Some code to create cppObj from CPP library that uses OpenCV
cv::Mat processedImage = cppObj->align(m);
return pyopencv_from(processedImage);
}
从OpenCV的源代码中获取转换函数。转换代码在注释行给出了“if(!PyArray_Check(o))...”的分段错误。
static int pyopencv_to(const PyObject* o, Mat& m, const char* name = "<unknown>", bool allowND=true)
{
if(!o || o == Py_None)
{
if( !m.data )
m.allocator = &g_numpyAllocator;
return true;
}
if( !PyArray_Check(o) ) // Segmentation fault inside PyArray_Check(o)
{
failmsg("%s is not a numpy array", name);
return false;
}
int typenum = PyArray_TYPE(o);
int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S :
typenum == NPY_INT || typenum == NPY_LONG ? CV_32S :
typenum == NPY_FLOAT ? CV_32F :
typenum == NPY_DOUBLE ? CV_64F : -1;
if( type < 0 )
{
failmsg("%s data type = %d is not supported", name, typenum);
return false;
}
int ndims = PyArray_NDIM(o);
if(ndims >= CV_MAX_DIM)
{
failmsg("%s dimensionality (=%d) is too high", name, ndims);
return false;
}
int size[CV_MAX_DIM+1];
size_t step[CV_MAX_DIM+1], elemsize = CV_ELEM_SIZE1(type);
const npy_intp* _sizes = PyArray_DIMS(o);
const npy_intp* _strides = PyArray_STRIDES(o);
bool transposed = false;
for(int i = 0; i < ndims; i++)
{
size[i] = (int)_sizes[i];
step[i] = (size_t)_strides[i];
}
if( ndims == 0 || step[ndims-1] > elemsize ) {
size[ndims] = 1;
step[ndims] = elemsize;
ndims++;
}
if( ndims >= 2 && step[0] < step[1] )
{
std::swap(size[0], size[1]);
std::swap(step[0], step[1]);
transposed = true;
}
if( ndims == 3 && size[2] <= CV_CN_MAX && step[1] == elemsize*size[2] )
{
ndims--;
type |= CV_MAKETYPE(0, size[2]);
}
if( ndims > 2 && !allowND )
{
failmsg("%s has more than 2 dimensions", name);
return false;
}
m = Mat(ndims, size, type, PyArray_DATA(o), step);
if( m.data )
{
m.refcount = refcountFromPyObject(o);
m.addref(); // protect the original numpy array from deallocation
// (since Mat destructor will decrement the reference counter)
};
m.allocator = &g_numpyAllocator;
if( transposed )
{
Mat tmp;
tmp.allocator = &g_numpyAllocator;
transpose(m, tmp);
m = tmp;
}
return true;
}
static PyObject* pyopencv_from(const Mat& m)
{
if( !m.data )
Py_RETURN_NONE;
Mat temp, *p = (Mat*)&m;
if(!p->refcount || p->allocator != &g_numpyAllocator)
{
temp.allocator = &g_numpyAllocator;
m.copyTo(temp);
p = &temp;
}
p->addref();
return pyObjectFromRefcount(p->refcount);
}
我的python测试程序:
import pysomemodule # My python wrapped library.
import cv2
def main():
myobj = pysomemodule.ABC("faces.train") # Create python object. This works.
image = cv2.imread('61.jpg')
processedImage = myobj.doSomething(image)
cv2.imshow("test", processedImage)
cv2.waitKey()
if __name__ == "__main__":
main()
答案 0 :(得分:33)
我解决了这个问题,所以我想我会在这里与可能遇到同样问题的其他人分享。
基本上,为了摆脱分段错误,我需要调用numpy的import_array()函数。
从python运行C ++代码的“高级”视图是:
假设你在python中有一个函数foo(arg)
,它是一些C ++函数的绑定。当你调用foo(myObj)
时,必须有一些代码将python对象“myObj”转换为你的C ++代码可以作用的形式。此代码通常使用SWIG或Boost :: Python等工具半自动创建。 (我在下面的例子中使用了Boost :: Python。)
现在,foo(arg)
是一些用于某些C ++函数的python绑定。此C ++函数将接收通用PyObject
指针作为参数。您需要使用C ++代码将此PyObject
指针转换为“等效”C ++对象。在我的例子中,我的python代码将OpenCV numpy数组作为函数的参数传递给OpenCV图像。 C ++中的“等效”形式是OpenCV C ++ Mat对象。 OpenCV在cv2.cpp中提供了一些代码(下面再现),将PyObject
指针(代表numpy数组)转换为C ++ Mat。更简单的数据类型(如int和string)不需要用户编写这些转换函数,因为它们由Boost :: Python自动转换。
将PyObject
指针转换为合适的C ++表单后,C ++代码可以对其执行操作。当数据必须从C ++返回到python时,会出现类似的情况,需要C ++代码将数据的C ++表示转换为某种形式的PyObject
。 Boost :: Python将把PyObject
转换为相应的python表单来处理其余部分。当foo(arg)
在python中返回结果时,它是python可用的形式。就是这样。
下面的代码展示了如何包装C ++类“ABC”并公开其方法“doSomething”,它从python中获取一个numpy数组(用于图像),将其转换为OpenCV的C ++ Mat,进行一些处理,转换结果到PyObject *,并将其返回给python解释器。您可以公开任意数量的函数/方法(参见下面代码中的注释)。
abc.hpp:
#ifndef ABC_HPP
#define ABC_HPP
#include <Python.h>
#include <string>
class ABC
{
// Other declarations
ABC();
ABC(const std::string& someConfigFile);
virtual ~ABC();
PyObject* doSomething(PyObject* image); // We want our python code to be able to call this function to do some processing using OpenCV and return the result.
// Other declarations
};
#endif
abc.cpp:
#include "abc.hpp"
#include "my_cpp_library.h" // This is what we want to make available in python. It uses OpenCV to perform some processing.
#include "numpy/ndarrayobject.h"
#include "opencv2/core/core.hpp"
// The following conversion functions are taken from OpenCV's cv2.cpp file inside modules/python/src2 folder.
static PyObject* opencv_error = 0;
static int failmsg(const char *fmt, ...)
{
char str[1000];
va_list ap;
va_start(ap, fmt);
vsnprintf(str, sizeof(str), fmt, ap);
va_end(ap);
PyErr_SetString(PyExc_TypeError, str);
return 0;
}
class PyAllowThreads
{
public:
PyAllowThreads() : _state(PyEval_SaveThread()) {}
~PyAllowThreads()
{
PyEval_RestoreThread(_state);
}
private:
PyThreadState* _state;
};
class PyEnsureGIL
{
public:
PyEnsureGIL() : _state(PyGILState_Ensure()) {}
~PyEnsureGIL()
{
PyGILState_Release(_state);
}
private:
PyGILState_STATE _state;
};
#define ERRWRAP2(expr) \
try \
{ \
PyAllowThreads allowThreads; \
expr; \
} \
catch (const cv::Exception &e) \
{ \
PyErr_SetString(opencv_error, e.what()); \
return 0; \
}
using namespace cv;
static PyObject* failmsgp(const char *fmt, ...)
{
char str[1000];
va_list ap;
va_start(ap, fmt);
vsnprintf(str, sizeof(str), fmt, ap);
va_end(ap);
PyErr_SetString(PyExc_TypeError, str);
return 0;
}
static size_t REFCOUNT_OFFSET = (size_t)&(((PyObject*)0)->ob_refcnt) +
(0x12345678 != *(const size_t*)"\x78\x56\x34\x12\0\0\0\0\0")*sizeof(int);
static inline PyObject* pyObjectFromRefcount(const int* refcount)
{
return (PyObject*)((size_t)refcount - REFCOUNT_OFFSET);
}
static inline int* refcountFromPyObject(const PyObject* obj)
{
return (int*)((size_t)obj + REFCOUNT_OFFSET);
}
class NumpyAllocator : public MatAllocator
{
public:
NumpyAllocator() {}
~NumpyAllocator() {}
void allocate(int dims, const int* sizes, int type, int*& refcount,
uchar*& datastart, uchar*& data, size_t* step)
{
PyEnsureGIL gil;
int depth = CV_MAT_DEPTH(type);
int cn = CV_MAT_CN(type);
const int f = (int)(sizeof(size_t)/8);
int typenum = depth == CV_8U ? NPY_UBYTE : depth == CV_8S ? NPY_BYTE :
depth == CV_16U ? NPY_USHORT : depth == CV_16S ? NPY_SHORT :
depth == CV_32S ? NPY_INT : depth == CV_32F ? NPY_FLOAT :
depth == CV_64F ? NPY_DOUBLE : f*NPY_ULONGLONG + (f^1)*NPY_UINT;
int i;
npy_intp _sizes[CV_MAX_DIM+1];
for( i = 0; i < dims; i++ )
{
_sizes[i] = sizes[i];
}
if( cn > 1 )
{
/*if( _sizes[dims-1] == 1 )
_sizes[dims-1] = cn;
else*/
_sizes[dims++] = cn;
}
PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum);
if(!o)
{
CV_Error_(CV_StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
}
refcount = refcountFromPyObject(o);
npy_intp* _strides = PyArray_STRIDES(o);
for( i = 0; i < dims - (cn > 1); i++ )
step[i] = (size_t)_strides[i];
datastart = data = (uchar*)PyArray_DATA(o);
}
void deallocate(int* refcount, uchar*, uchar*)
{
PyEnsureGIL gil;
if( !refcount )
return;
PyObject* o = pyObjectFromRefcount(refcount);
Py_INCREF(o);
Py_DECREF(o);
}
};
NumpyAllocator g_numpyAllocator;
enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 };
static int pyopencv_to(const PyObject* o, Mat& m, const char* name = "<unknown>", bool allowND=true)
{
//NumpyAllocator g_numpyAllocator;
if(!o || o == Py_None)
{
if( !m.data )
m.allocator = &g_numpyAllocator;
return true;
}
if( !PyArray_Check(o) )
{
failmsg("%s is not a numpy array", name);
return false;
}
int typenum = PyArray_TYPE(o);
int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S :
typenum == NPY_INT || typenum == NPY_LONG ? CV_32S :
typenum == NPY_FLOAT ? CV_32F :
typenum == NPY_DOUBLE ? CV_64F : -1;
if( type < 0 )
{
failmsg("%s data type = %d is not supported", name, typenum);
return false;
}
int ndims = PyArray_NDIM(o);
if(ndims >= CV_MAX_DIM)
{
failmsg("%s dimensionality (=%d) is too high", name, ndims);
return false;
}
int size[CV_MAX_DIM+1];
size_t step[CV_MAX_DIM+1], elemsize = CV_ELEM_SIZE1(type);
const npy_intp* _sizes = PyArray_DIMS(o);
const npy_intp* _strides = PyArray_STRIDES(o);
bool transposed = false;
for(int i = 0; i < ndims; i++)
{
size[i] = (int)_sizes[i];
step[i] = (size_t)_strides[i];
}
if( ndims == 0 || step[ndims-1] > elemsize ) {
size[ndims] = 1;
step[ndims] = elemsize;
ndims++;
}
if( ndims >= 2 && step[0] < step[1] )
{
std::swap(size[0], size[1]);
std::swap(step[0], step[1]);
transposed = true;
}
if( ndims == 3 && size[2] <= CV_CN_MAX && step[1] == elemsize*size[2] )
{
ndims--;
type |= CV_MAKETYPE(0, size[2]);
}
if( ndims > 2 && !allowND )
{
failmsg("%s has more than 2 dimensions", name);
return false;
}
m = Mat(ndims, size, type, PyArray_DATA(o), step);
if( m.data )
{
m.refcount = refcountFromPyObject(o);
m.addref(); // protect the original numpy array from deallocation
// (since Mat destructor will decrement the reference counter)
};
m.allocator = &g_numpyAllocator;
if( transposed )
{
Mat tmp;
tmp.allocator = &g_numpyAllocator;
transpose(m, tmp);
m = tmp;
}
return true;
}
static PyObject* pyopencv_from(const Mat& m)
{
if( !m.data )
Py_RETURN_NONE;
Mat temp, *p = (Mat*)&m;
if(!p->refcount || p->allocator != &g_numpyAllocator)
{
temp.allocator = &g_numpyAllocator;
m.copyTo(temp);
p = &temp;
}
p->addref();
return pyObjectFromRefcount(p->refcount);
}
ABC::ABC() {}
ABC::~ABC() {}
// Note the import_array() from NumPy must be called else you will experience segmentation faults.
ABC::ABC(const std::string &someConfigFile)
{
// Initialization code. Possibly store someConfigFile etc.
import_array(); // This is a function from NumPy that MUST be called.
// Do other stuff
}
// The conversions functions above are taken from OpenCV. The following function is
// what we define to access the C++ code we are interested in.
PyObject* ABC::doSomething(PyObject* image)
{
cv::Mat cvImage;
pyopencv_to(image, cvImage); // From OpenCV's source
MyCPPClass obj; // Some object from the C++ library.
cv::Mat processedImage = obj.process(cvImage);
return pyopencv_from(processedImage); // From OpenCV's source
}
使用Boost Python创建python模块的代码。我从http://jayrambhia.wordpress.com/tag/boost/:
获取了这个以及下面的Makefilepysomemodule.cpp:
#include <string>
#include<boost/python.hpp>
#include "abc.hpp"
using namespace boost::python;
BOOST_PYTHON_MODULE(pysomemodule)
{
class_<ABC>("ABC", init<const std::string &>())
.def(init<const std::string &>())
.def("doSomething", &ABC::doSomething) // doSomething is the method in class ABC you wish to expose. One line for each method (or function depending on how you structure your code). Note: You don't have to expose everything in the library, just the ones you wish to make available to python.
;
}
最后,Makefile(在Ubuntu上成功编译但是应该在其他地方工作,可能只需要很少的调整)。
PYTHON_VERSION = 2.7
PYTHON_INCLUDE = /usr/include/python$(PYTHON_VERSION)
# location of the Boost Python include files and library
BOOST_INC = /usr/local/include/boost
BOOST_LIB = /usr/local/lib
OPENCV_LIB = `pkg-config --libs opencv`
OPENCV_CFLAGS = `pkg-config --cflags opencv`
MY_CPP_LIB = lib_my_cpp_library.so
TARGET = pysomemodule
SRC = pysomemodule.cpp abc.cpp
OBJ = pysomemodule.o abc.o
$(TARGET).so: $(OBJ)
g++ -shared $(OBJ) -L$(BOOST_LIB) -lboost_python -L/usr/lib/python$(PYTHON_VERSION)/config -lpython$(PYTHON_VERSION) -o $(TARGET).so $(OPENCV_LIB) $(MY_CPP_LIB)
$(OBJ): $(SRC)
g++ -I$(PYTHON_INCLUDE) -I$(BOOST_INC) $(OPENCV_CFLAGS) -fPIC -c $(SRC)
clean:
rm -f $(OBJ)
rm -f $(TARGET).so
成功编译库后,目录中应该有一个文件“pysomemodule.so”。将此lib文件放在python解释器可访问的位置。然后,您可以导入此模块并在上面创建“ABC”类的实例,如下所示:
import pysomemodule
foo = pysomemodule.ABC("config.txt") # This will create an instance of ABC
现在,给定一个OpenCV numpy数组图像,我们可以使用:
调用C ++函数processedImage = foo.doSomething(image) # Where the argument "image" is a OpenCV numpy image.
请注意,您需要Boost Python,Numpy dev以及Python dev库来创建绑定。
以下两个链接中的NumPy文档在帮助理解转换代码中使用的方法以及必须调用import_array()的原因方面特别有用。特别是,官方numpy文档有助于理解OpenCV的python绑定代码。
http://dsnra.jpl.nasa.gov/software/Python/numpydoc/numpy-13.html http://docs.scipy.org/doc/numpy/user/c-info.how-to-extend.html
希望这有帮助。
答案 1 :(得分:8)
我希望这可以帮助人们寻找快速简便的方法。
这是github repo我用开放式C ++代码编写的,用于使用OpenCV的Mat类公开代码,尽可能少的痛苦。
[更新]此代码现在适用于 OpenCV 2.X 和 OpenCV 3.X 。现在也可以使用CMake和对Python 3.X的实验支持。
答案 2 :(得分:0)
一种选择是将代码直接实现到modules / python / src2 / cv2.cpp中作为python绑定的自定义分支。
'OpenCV构建系统将它捆绑成单个“cv2”。贡献模块的示例是here。 https://github.com/opencv/opencv/issues/8872#issuecomment-307136942