在OpenCV 2.x中为warpPerspective()函数指定原点

时间:2010-11-25 16:18:12

标签: opencv

我尝试为warpPerspective()函数指定一个与基本(0,0)不同的原点,以便独立于支持图像大小应用变换。我在原始代码中添加了一个CvPoint参数,但是我找不到使用这些坐标的位置。我试图在X0,Y0和W0的计算中使用它们,但它不起作用,这只是在生成的图像中移动变换后的图像。有什么想法吗?

这里是代码:

void warpPerspective( const Mat& src, Mat& dst, const Mat& M0, Size dsize,
            int flags, int borderType, const Scalar& borderValue, CvPoint origin )
{
    dst.create( dsize, src.type() );

    const int BLOCK_SZ = 32;
    short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
    double M[9];
    Mat _M(3, 3, CV_64F, M);
    int interpolation = flags & INTER_MAX;
    if( interpolation == INTER_AREA )
        interpolation = INTER_LINEAR;

    CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
    M0.convertTo(_M, _M.type());

    if( !(flags & WARP_INVERSE_MAP) )
     invert(_M, _M);

    int x, y, x1, y1, width = dst.cols, height = dst.rows;

    int bh0 = std::min(BLOCK_SZ/2, height);
    int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width);
    bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height);

    for( y = 0; y < height; y += bh0 )
    {
        for( x = 0; x < width; x += bw0 )
        {
        int bw = std::min( bw0, width - x);
        int bh = std::min( bh0, height - y);

        Mat _XY(bh, bw, CV_16SC2, XY), _A;
        Mat dpart(dst, Rect(x, y, bw, bh));

        for( y1 = 0; y1 < bh; y1++ )
        {
                short* xy = XY + y1*bw*2;
                double X0 = M[0]*x + M[1]*(y + y1) + M[2];
                double Y0 = M[3]*x + M[4]*(y + y1) + M[5];
                double W0 = M[6]*x + M[7]*(y + y1) + M[8];

                if( interpolation == INTER_NEAREST )
                    for( x1 = 0; x1 < bw; x1++ )
                    {
                        double W = W0 + M[6]*x1;
                        W = W ? 1./W : 0;
                        int X = saturate_cast<int>((X0 + M[0]*x1)*W);
                        int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
                        xy[x1*2] = (short)X;
                        xy[x1*2+1] = (short)Y;
                    }
                else
                {
                    short* alpha = A + y1*bw;
                    for( x1 = 0; x1 < bw; x1++ )
                    {
                        double W = W0 + M[6]*x1;
                        W = W ? INTER_TAB_SIZE/W : 0;
                        int X = saturate_cast<int>((X0 + M[0]*x1)*W);
                        int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
                        xy[x1*2] = (short)(X >> INTER_BITS);
                        xy[x1*2+1] = (short)(Y >> INTER_BITS);
                        alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
                                                  (X & (INTER_TAB_SIZE-1)));
                    }
                }
        }

        if( interpolation == INTER_NEAREST )
                remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
        else
        {
                Mat _A(bh, bw, CV_16U, A);
                remap( src, dpart, _XY, _A, interpolation, borderType, borderValue );
        }
        }
    }
}

3 个答案:

答案 0 :(得分:5)

好的,我自己找到了!你有两件事要做:

  • 计算源参照中的目标维度,并使用这些维度进行重映射;
  • 增加计算点坐标。

以下是如此转换的代码:

void warpPerspective( const Mat& src, Mat& dst, const Mat& M0, Size dsize,
        int flags, int borderType, const Scalar& borderValue, CvPoint origin )
{
dst.create( dsize, src.type() );

const int BLOCK_SZ = 32;
short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
double M[9];
Mat _M(3, 3, CV_64F, M);
int interpolation = flags & INTER_MAX;
if( interpolation == INTER_AREA )
    interpolation = INTER_LINEAR;

CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
M0.convertTo(_M, _M.type());

if( !(flags & WARP_INVERSE_MAP) )
 invert(_M, _M);

int x, xDest, y, yDest, x1, y1, width = dst.cols, height = dst.rows;

int bh0 = std::min(BLOCK_SZ/2, height);
int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width);
bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height);

for( y = -origin.y, yDest = 0; y < height; y += bh0, yDest += bh0 )
{
    for( x = -origin.x, xDest = 0; x < width; x += bw0, xDest += bw0 )
    {
    int bw = std::min( bw0, width - x);
    int bh = std::min( bh0, height - y);
    // to avoid dimensions errors
    if (bw <= 0 || bh <= 0)
    break;

    Mat _XY(bh, bw, CV_16SC2, XY), _A;
    Mat dpart(dst, Rect(xDest, yDest, bw, bh));

    for( y1 = 0; y1 < bh; y1++ )
    {
            short* xy = XY + y1*bw*2;
            double X0 = M[0]*x + M[1]*(y + y1) + M[2];
            double Y0 = M[3]*x + M[4]*(y + y1) + M[5];
            double W0 = M[6]*x + M[7]*(y + y1) + M[8];

            if( interpolation == INTER_NEAREST )
                for( x1 = 0; x1 < bw; x1++ )
                {
                    double W = W0 + M[6]*x1;
                    W = W ? 1./W : 0;
                    int X = saturate_cast<int>((X0 + M[0]*x1)*W);
                    int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
                    xy[x1*2] = (short)X;
                    xy[x1*2+1] = (short)Y;
                }
            else
            {
                short* alpha = A + y1*bw;
                for( x1 = 0; x1 < bw; x1++ )
                {
                    double W = W0 + M[6]*x1;
                    W = W ? INTER_TAB_SIZE/W : 0;
                    int X = saturate_cast<int>((X0 + M[0]*x1)*W);
                    int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
                    xy[x1*2] = (short)(X >> INTER_BITS) + origin.x;
                    xy[x1*2+1] = (short)(Y >> INTER_BITS) + origin.y;
                    alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
                                              (X & (INTER_TAB_SIZE-1)));
                }
            }
    }

    if( interpolation == INTER_NEAREST )
            remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
    else
    {
            Mat _A(bh, bw, CV_16U, A);
            remap( src, dpart, _XY, _A, interpolation, borderType, borderValue );
    }
    }
}
}

使用此功能:

CvPoint transformPoint(const CvPoint pointToTransform, const CvMat* matrix) {
double coordinates[3] = {pointToTransform.x, pointToTransform.y, 1};
CvMat originVector = cvMat(3, 1, CV_64F, coordinates);
CvMat transformedVector = cvMat(3, 1, CV_64F, coordinates);
cvMatMul(matrix, &originVector, &transformedVector);
CvPoint outputPoint = cvPoint((int)(cvmGet(&transformedVector, 0, 0) / cvmGet(&transformedVector, 2, 0)), (int)(cvmGet(&transformedVector, 1, 0) / cvmGet(&transformedVector, 2, 0)));
return outputPoint;
}

答案 1 :(得分:2)

更简单,更简洁的解决方案是修改透视转换。您可以进行平移,将原点移动到所需位置,然后进行透视变换,最后进行反向平移。

这是python中的一个小示例程序,它围绕点(100,100)旋转图像45度:

import cv2
import numpy as np


def translation_mat(dx, dy):
return np.array([1, 0, dx, 0, 1, dy, 0, 0,     1]).reshape((3,3))

def main():
    img = cv2.imread(r"pigeon.png", cv2.IMREAD_GRAYSCALE)

    # a simple rotation by 45 degrees
rot = np.array([np.sin(np.pi/4), -np.cos(np.pi/4), 0, np.cos(np.pi/4), np.sin(np.pi/4), 0, 0, 0, 1]).reshape((3,3))
    t1 = translation_mat(-100, -100)
    t2 = translation_mat(100, 100)
    rot_shifted = t2.dot(rot.dot(t1))
    size = (img.shape[1], img.shape[0])

    img1 = cv2.warpPerspective(img, rot, size)
    img2 = cv2.warpPerspective(img, rot_shifted, size)

    cv2.imshow("Original image", img)
    cv2.imshow("Rotated around (0,0)", img1)
    cv2.imshow("Rotated around(100, 100)", img2)
    cv2.waitKey(0)


if __name__ == '__main__':
    main()

并非您从右到左阅读转换顺序。

rot_shifted = t2.dot(rot.dot(t1))

首先应用t1,然后是rot,然后是t2。

答案 2 :(得分:0)

对于那些在Python中寻找这篇文章的人来说,这是一个开始。我不是百分之百确定它的工作原理,因为我已经从中删除了一些优化。也有线路空间插值的问题,我根本就没有使用它,但如果你这样做,你可能想仔细看看。

import cv2
import numpy as np


def warp_perspective(src, M, (width, height), (origin_x, origin_y),
                     flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,
                     borderValue=0, dst=None):
    """
    Implementation in Python using base code from
    http://stackoverflow.com/questions/4279008/specify-an-origin-to-warpperspective-function-in-opencv-2-x

    Note there is an issue with linear interpolation.
    """
    B_SIZE = 32

    if dst == None:
        dst = np.zeros((height, width, 3), dtype=src.dtype)

    # Set interpolation mode.
    interpolation = flags & cv2.INTER_MAX
    if interpolation == cv2.INTER_AREA:
        raise Exception('Area interpolation is not supported!')

    # Prepare matrix.    
    M = M.astype(np.float64)
    if not(flags & cv2.WARP_INVERSE_MAP):
        M = cv2.invert(M)[1]
    M = M.flatten()

    x_dst = y_dst = 0
    for y in xrange(-origin_y, height, B_SIZE):
        for x in xrange(-origin_x, width, B_SIZE):

            print (x, y)

            # Block dimensions.
            bw = min(B_SIZE, width - x_dst)
            bh = min(B_SIZE, height - y_dst)

            # To avoid dimension errors.
            if bw <= 0 or bh <= 0:
                break

            # View of the destination array.
            dpart = dst[y_dst:y_dst+bh, x_dst:x_dst+bw]

            # Original code used view of array here, but we're using numpy array's.
            XY = np.zeros((bh, bw, 2), dtype=np.int16)
            A = np.zeros((bh, bw), dtype=np.uint16)

            for y1 in xrange(bh):
                X0 = M[0]*x + M[1]*(y + y1) + M[2]
                Y0 = M[3]*x + M[4]*(y + y1) + M[5]
                W0 = M[6]*x + M[7]*(y + y1) + M[8]

                if interpolation == cv2.INTER_NEAREST:
                    for x1 in xrange(bw):
                        W = np.float64(W0 + M[6]*x1);
                        if W != 0:
                            W = np.float64(1.0)/W

                        X = np.int32((X0 + M[0]*x1)*W)
                        Y = np.int32((Y0 + M[3]*x1)*W)
                        XY[y1, x1][0] = np.int16(X)
                        XY[y1, x1][1] = np.int16(Y)
                else:
                    for x1 in xrange(bw):
                        W = np.float64(W0 + M[6]*x1);
                        if W != 0:
                            W = cv2.INTER_TAB_SIZE/W

                        X = np.int32((X0 + M[0]*x1)*W)
                        Y = np.int32((Y0 + M[3]*x1)*W)
                        XY[y1, x1][0] = np.int16((X >> cv2.INTER_BITS) + origin_x)
                        XY[y1, x1][1] = np.int16((Y >> cv2.INTER_BITS) + origin_y)
                        A[y1, x1] = np.int16(((Y & (cv2.INTER_TAB_SIZE-1))*cv2.INTER_TAB_SIZE + (X & (cv2.INTER_TAB_SIZE-1))))

            if interpolation == cv2.INTER_NEAREST:
                cv2.remap(src, XY, None, interpolation, dst=dpart,
                          borderMode=borderMode, borderValue=borderValue)
            else:
                cv2.remap(src, XY, A, interpolation, dst=dpart,
                          borderMode=borderMode, borderValue=borderValue)

            x_dst += B_SIZE
        x_dst = 0
        y_dst += B_SIZE

    return dst