在OpenCV Scene Text Detection中,我们有两个C ++函数,具有不同的参数
void cv::text::detectRegions ( InputArray image,
const Ptr< ERFilter > & er_filter1,
const Ptr< ERFilter > & er_filter2,
std::vector< std::vector< Point > > & regions
)
void cv::text::detectRegions ( InputArray image,
const Ptr< ERFilter > & er_filter1,
const Ptr< ERFilter > & er_filter2,
std::vector< Rect > & groups_rects,
int method = ERGROUPING_ORIENTATION_HORIZ,
const String & filename = String(),
float minProbability = (float) 0.5
)
但它相应的python功能是:
regions= cv.text.detectRegions(image, er_filter1, er_filter2)
这个python绑定如何决定执行哪个C ++函数?
答案 0 :(得分:1)
绑定是手动进行的,因此您需要在opencv中检查python绑定的代码,以了解使用的函数。 https://docs.opencv.org/3.4/da/d49/tutorial_py_bindings_basics.html
答案 1 :(得分:0)
以下是OpenCV在我的机器上为detectRegions
函数创建的Python代码。 Python函数签名是:cv2.text.detectRegions(image, er_filter1, er_filter2[, method[, filename[, minProbability]]]) -> groups_rects
。
如果仔细查看代码,您会看到根据您提供的参数的数量和类型调用了两个不同版本的cv::text::detectRegions
。
static PyObject* pyopencv_cv_text_detectRegions(PyObject* , PyObject* args, PyObject* kw)
{
using namespace cv::text;
{
PyObject* pyobj_image = NULL;
Mat image;
PyObject* pyobj_er_filter1 = NULL;
Ptr<ERFilter> er_filter1;
PyObject* pyobj_er_filter2 = NULL;
Ptr<ERFilter> er_filter2;
vector_vector_Point regions;
const char* keywords[] = { "image", "er_filter1", "er_filter2", NULL };
if( PyArg_ParseTupleAndKeywords(args, kw, "OOO:detectRegions", (char**)keywords, &pyobj_image, &pyobj_er_filter1, &pyobj_er_filter2) &&
pyopencv_to(pyobj_image, image, ArgInfo("image", 0)) &&
pyopencv_to(pyobj_er_filter1, er_filter1, ArgInfo("er_filter1", 0)) &&
pyopencv_to(pyobj_er_filter2, er_filter2, ArgInfo("er_filter2", 0)) )
{
ERRWRAP2(cv::text::detectRegions(image, er_filter1, er_filter2, regions));
return pyopencv_from(regions);
}
}
PyErr_Clear();
{
PyObject* pyobj_image = NULL;
UMat image;
PyObject* pyobj_er_filter1 = NULL;
Ptr<ERFilter> er_filter1;
PyObject* pyobj_er_filter2 = NULL;
Ptr<ERFilter> er_filter2;
vector_vector_Point regions;
const char* keywords[] = { "image", "er_filter1", "er_filter2", NULL };
if( PyArg_ParseTupleAndKeywords(args, kw, "OOO:detectRegions", (char**)keywords, &pyobj_image, &pyobj_er_filter1, &pyobj_er_filter2) &&
pyopencv_to(pyobj_image, image, ArgInfo("image", 0)) &&
pyopencv_to(pyobj_er_filter1, er_filter1, ArgInfo("er_filter1", 0)) &&
pyopencv_to(pyobj_er_filter2, er_filter2, ArgInfo("er_filter2", 0)) )
{
ERRWRAP2(cv::text::detectRegions(image, er_filter1, er_filter2, regions));
return pyopencv_from(regions);
}
}
PyErr_Clear();
{
PyObject* pyobj_image = NULL;
Mat image;
PyObject* pyobj_er_filter1 = NULL;
Ptr<ERFilter> er_filter1;
PyObject* pyobj_er_filter2 = NULL;
Ptr<ERFilter> er_filter2;
vector_Rect groups_rects;
int method=ERGROUPING_ORIENTATION_HORIZ;
PyObject* pyobj_filename = NULL;
String filename;
float minProbability=(float)0.5;
const char* keywords[] = { "image", "er_filter1", "er_filter2", "method", "filename", "minProbability", NULL };
if( PyArg_ParseTupleAndKeywords(args, kw, "OOO|iOf:detectRegions", (char**)keywords, &pyobj_image, &pyobj_er_filter1, &pyobj_er_filter2, &method, &pyobj_filename, &minProbability) &&
pyopencv_to(pyobj_image, image, ArgInfo("image", 0)) &&
pyopencv_to(pyobj_er_filter1, er_filter1, ArgInfo("er_filter1", 0)) &&
pyopencv_to(pyobj_er_filter2, er_filter2, ArgInfo("er_filter2", 0)) &&
pyopencv_to(pyobj_filename, filename, ArgInfo("filename", 0)) )
{
ERRWRAP2(cv::text::detectRegions(image, er_filter1, er_filter2, groups_rects, method, filename, minProbability));
return pyopencv_from(groups_rects);
}
}
PyErr_Clear();
{
PyObject* pyobj_image = NULL;
UMat image;
PyObject* pyobj_er_filter1 = NULL;
Ptr<ERFilter> er_filter1;
PyObject* pyobj_er_filter2 = NULL;
Ptr<ERFilter> er_filter2;
vector_Rect groups_rects;
int method=ERGROUPING_ORIENTATION_HORIZ;
PyObject* pyobj_filename = NULL;
String filename;
float minProbability=(float)0.5;
const char* keywords[] = { "image", "er_filter1", "er_filter2", "method", "filename", "minProbability", NULL };
if( PyArg_ParseTupleAndKeywords(args, kw, "OOO|iOf:detectRegions", (char**)keywords, &pyobj_image, &pyobj_er_filter1, &pyobj_er_filter2, &method, &pyobj_filename, &minProbability) &&
pyopencv_to(pyobj_image, image, ArgInfo("image", 0)) &&
pyopencv_to(pyobj_er_filter1, er_filter1, ArgInfo("er_filter1", 0)) &&
pyopencv_to(pyobj_er_filter2, er_filter2, ArgInfo("er_filter2", 0)) &&
pyopencv_to(pyobj_filename, filename, ArgInfo("filename", 0)) )
{
ERRWRAP2(cv::text::detectRegions(image, er_filter1, er_filter2, groups_rects, method, filename, minProbability));
return pyopencv_from(groups_rects);
}
}
return NULL;
}