立体视觉(OpenCV) - 如何改善立体声整流效果?

时间:2017-03-23 12:39:25

标签: python opencv computer-vision camera-calibration stereo-3d

我目前正在开展一个本科项目,我需要在立体图像中找到深度,因此获得一个好的视差图是至关重要的。我正在使用传统的棋盘方法进行校准,并在使用20到90个视图之间交替进行,我相信这已经足够了。尽管整改并不顺利。我正在使用OpenCV和Python。

这些是用于校准的20个棋盘视图的校正结果。

Rectification Results

这是我的整改和校准代码,也适用于主程序。

立体相机校准

import cv,time,sys
from itertools import izip

n_boards=0  
board_w=int(sys.argv[1])    
board_h=int(sys.argv[2])    
n_boards=int(sys.argv[3])   
board_n=board_w*board_h     
board_sz=(board_w,board_h)  

image_points0=cv.CreateMat(n_boards*board_n,2,cv.CV_32FC1)
object_points=cv.CreateMat(n_boards*board_n,3,cv.CV_32FC1)
point_counts=cv.CreateMat(n_boards,1,cv.CV_32SC1)
intrinsic_matrix0=cv.CreateMat(3,3,cv.CV_32FC1)
distortion_coefficient0=cv.CreateMat(5,1,cv.CV_32FC1)
image_points1=cv.CreateMat(n_boards*board_n,2,cv.CV_32FC1)
intrinsic_matrix1=cv.CreateMat(3,3,cv.CV_32FC1)
distortion_coefficient1=cv.CreateMat(5,1,cv.CV_32FC1)

R=cv.CreateMat(3,3,cv.CV_64F)
T=cv.CreateMat(3,1,cv.CV_64F)
E=cv.CreateMat(3,3,cv.CV_64F)
F=cv.CreateMat(3,3,cv.CV_64F)
#term_crit=(cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER,30,0.1)
#capture frames of specified properties and modification of matrix values
i=0
z=0 
successes=0

found=0
with open('Left.txt','rb') as f:
    imgl= [line.strip() for line in f]
    with open('Right.txt','rb') as f1:
         imgr=[line.strip() for line in f1]
         for (image0,image1) in izip(imgl,imgr):
        if image0 =='' or image1== '' :
            break;
        else:

                Image0 = cv.LoadImage(image0)
                Image1 = cv.LoadImage(image1)

            gray_image0=cv.CreateImage(cv.GetSize(Image0),8,1)
            cv.CvtColor(Image0,gray_image0,cv.CV_BGR2GRAY)
            gray_image1=cv.CreateImage(cv.GetSize(Image1),8,1)
            cv.CvtColor(Image1,gray_image1,cv.CV_BGR2GRAY)
            (found0,corners0)=cv.FindChessboardCorners(gray_image0,board_sz,cv.CV_CALIB_CB_ADAPTIVE_THRESH| cv.CV_CALIB_CB_FILTER_QUADS)
            (found1,corners1)=cv.FindChessboardCorners(gray_image1,board_sz,cv.CV_CALIB_CB_ADAPTIVE_THRESH| cv.CV_CALIB_CB_FILTER_QUADS)
            cv.FindCornerSubPix(gray_image0,corners0,(11,11),(-1,-1),(cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER,30,0.001))
            cv.FindCornerSubPix(gray_image1,corners1,(11,11),(-1,-1),(cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER,30,0.001))     
            if found0==1 and found1==1:
                print "found frame number {0}".format(z+1)
                cv.DrawChessboardCorners(Image0,board_sz,corners0,1) 
                cv.DrawChessboardCorners(Image1,board_sz,corners1,1) 
                cv.ShowImage("Left cam corners",Image0);
                cv.ShowImage("Right cam corners",Image1);
                cv.WaitKey(33)
                corner_count0=len(corners0)
                corner_count1=len(corners1)
                print "corner count0",corner_count0
                print "corner count1",corner_count1
                z=z+1

            if len(corners0)==board_n and len(corners1)==board_n:
                step=successes*board_n
                k=step
                for j in range(board_n):
                    cv.Set2D(image_points0,k,0,corners0[j][0])
                    cv.Set2D(image_points0,k,1,corners0[j][1])

                    cv.Set2D(image_points1,k,0,corners1[j][0])
                    cv.Set2D(image_points1,k,1,corners1[j][1])

                    cv.Set2D(object_points,k,0,float(j)/float(board_w))
                    cv.Set2D(object_points,k,1,float(j)%float(board_w))
                    cv.Set2D(object_points,k,2,0.0)

                    k=k+1
                    cv.Set2D(point_counts,successes,0,board_n)
            successes=successes+1
            print successes
            time.sleep(2)
            print "-------------------------------------------------"
            print "\n"
print "Calibration OK, Matrices Created"
cv.DestroyWindow("Test Frame")

# now assigning new matrices according to view_count
object_points2=cv.CreateMat(successes*board_n,3,cv.CV_32FC1)
image_points20=cv.CreateMat(successes*board_n,2,cv.CV_32FC1)
image_points21=cv.CreateMat(successes*board_n,2,cv.CV_32FC1)
point_counts2=cv.CreateMat(successes,1,cv.CV_32SC1)

for i in range(successes*board_n):
    cv.Set2D(image_points20,i,0,cv.Get2D(image_points0,i,0))
    cv.Set2D(image_points20,i,1,cv.Get2D(image_points0,i,1))

    cv.Set2D(image_points21,i,0,cv.Get2D(image_points1,i,0))
    cv.Set2D(image_points21,i,1,cv.Get2D(image_points1,i,1))

    cv.Set2D(object_points2,i,0,cv.Get2D(object_points,i,0))
    cv.Set2D(object_points2,i,1,cv.Get2D(object_points,i,1))
    cv.Set2D(object_points2,i,2,cv.Get2D(object_points,i,2))


for i in range(successes):
    cv.Set2D(point_counts2,i,0,cv.Get2D(point_counts,i,0))

cv.Set2D(intrinsic_matrix0,0,0,1.0)
cv.Set2D(intrinsic_matrix0,1,1,1.0)

cv.Set2D(intrinsic_matrix1,0,0,1.0)
cv.Set2D(intrinsic_matrix1,1,1,1.0)

print "Checking Camera Calibration"
# camera calibration
cv.StereoCalibrate(object_points2,image_points20,image_points21,point_counts2,intrinsic_matrix0,
distortion_coefficient0,intrinsic_matrix1,distortion_coefficient1,cv.GetSize(Image0),R,T,E,F,
(cv.CV_TERMCRIT_ITER+cv.CV_TERMCRIT_EPS, 30, 1e-6), (cv.CV_CALIB_SAME_FOCAL_LENGTH+cv.CV_CALIB_ZERO_TANGENT_DIST+cv.CV_CALIB_FIX_FOCAL_LENGTH ))
print "Calibration Successful"  

# storing results in xml files
cv.Save("Image points1.xml",image_points20)
cv.Save("Image points2.xml",image_points21)
cv.Save("Intrinsics0.xml",intrinsic_matrix0)
cv.Save("Distortion0.xml",distortion_coefficient0)

cv.Save("Intrinsics1.xml",intrinsic_matrix1)
cv.Save("Distortion1.xml",distortion_coefficient1)
cv.Save("R.xml",R)
cv.Save("T.xml",T)

Stereo Rectification

import cv
import time
import numpy as np

def stereorectify(image0,image1):   

# Loading matrices output from the calibration step
    intrinsic0=cv.Load("Intrinsics0.xml")
    distortion0=cv.Load("Distortion0.xml")
    intrinsic1=cv.Load("Intrinsics1.xml")
    distortion1=cv.Load("Distortion1.xml")
    R=cv.Load("R.xml")
    T=cv.Load("T.xml")

# Rectification
    Ro=cv.CreateMat(3,1,cv.CV_64F);
    cv.Rodrigues2(R, Ro)
    size=cv.GetSize(image0)
    R1=cv.CreateMat(3,3,cv.CV_64F)
    R2=cv.CreateMat(3,3,cv.CV_64F)
    P1=cv.CreateMat(3,4,cv.CV_64F)
    P2=cv.CreateMat(3,4,cv.CV_64F)
    Q=cv.CreateMat(4,4,cv.CV_64F)
    flags=cv.CV_CALIB_ZERO_DISPARITY
    cv.StereoRectify(intrinsic0, intrinsic1, distortion0,distortion1,(320,240),R,T,R2, R1, P1,P2, Q,0,0)

#print newImageSize
    map1x = cv.CreateImage(size, cv.IPL_DEPTH_32F,1)
    map2x = cv.CreateImage(size, cv.IPL_DEPTH_32F,1)
#Right maps
    map1y = cv.CreateImage(size, cv.IPL_DEPTH_32F,1)
    map2y = cv.CreateImage(size, cv.IPL_DEPTH_32F,1)

# Undistorting/ Rectification
    cv.InitUndistortRectifyMap(intrinsic0, distortion0, R1, intrinsic0, map1x, map1y)
    cv.InitUndistortRectifyMap(intrinsic1, distortion1, R2, intrinsic1,map2x, map2y)
    cv.InitUndistortRectifyMap(intrinsic0, distortion0, R1, intrinsic0, map1x, map1y)
    cv.InitUndistortRectifyMap(intrinsic1, distortion1, R2, intrinsic1,map2x, map2y)
    cv.Remap(image0,image0, map1x, map1y,cv.CV_INTER_CUBIC)
    cv.Remap(image1,image1, map2x, map2y,cv.CV_INTER_CUBIC)

    cv.Save("R1.xml",R1);
    cv.Save("map1x.xml",map1x);
    cv.Save("Q.xml",Q)
    return image0,image1

主程序

import cv2,cv
from Disparity import getDisparity
from dimensions import dimensions
from stereo_rectify_live import stereorectify
import numpy as np
print "begin"
# setup webcam
capture1 = cv2.VideoCapture(1);# Camera 1 capture
capture2 = cv2.VideoCapture(0);# Camera 2 capture
# set resolution
capture1.set(cv.CV_CAP_PROP_FRAME_WIDTH,320)
capture1.set(cv.CV_CAP_PROP_FRAME_HEIGHT,240)

capture2.set(cv.CV_CAP_PROP_FRAME_WIDTH,320)
capture2.set(cv.CV_CAP_PROP_FRAME_HEIGHT,240)

h,w=240,320
Q=cv.Load("Q.xml");
Q=np.array(Q,np.float32);
f=0.8*w
#Q = np.float32([[1, 0, 0, -0.5*w],
#               [0,-1, 0,  0.5*h],
#               [0, 0, 0,     -f], 
#               [0, 0, 1,      0]])
xyz=cv.CreateImage((240,320),cv.IPL_DEPTH_32F,3)
z=cv.CreateMat(240,320,cv.CV_32FC1)
fromto=[(2,0)]
print "begin"
i=0;npts=0;dist=0;k = 0;
while k<100:
    # Setting up queries
        ret,cam1=capture1.read()
        print ret
        ret,cam2=capture2.read()
        print ret
        cam1,cam2=stereorectify(cv.fromarray(cam1),cv.fromarray(cam2))
        cam1 = np.array(cam1);
        cam2 = np.array(cam2);
        cv2.imshow('First Camera',cam1);
        cv2.imshow('Second Camera',cam2);
        disparity = getDisparity(cam1, cam2, "BM")
        cv2.imshow("Disparity",np.array(disparity))
        first = cv2.inRange(np.array(disparity),np.array(220),np.array(255))
        xyz = cv2.reprojectImageTo3D(first,Q)
        cv.MixChannels([cv.fromarray(xyz)],[z],fromto)
        focal_length = Q[2,3];
        base_distance = 7.112; #in cms
        x,y,w1,h1=dimensions(cv2.cvtColor(np.array(disparity),cv2.COLOR_GRAY2BGR),1,170,240);
        temp = np.array(z[x:y+h,y:x+w]);
        t=temp[temp != -np.inf];
        disparity = np.mean(abs(t));
        print 'shape',np.shape(z[x:y+h,y:x+w]),np.shape(temp);
        Actualdist = focal_length * base_distance/disparity;
        print 'disparity',disparity
        print 'f', focal_length
        print "Actual Distance (in cms) from object =",Actualdist
        print k
        cv2.waitKey(10);
        k=k+1;


print "done!"

非常感谢对此事的任何帮助,因为我觉得我可以做些什么来改进我的结果。谢谢!

0 个答案:

没有答案