我需要找到从两个不同相机捕获的同一场景的两个图像中提取的两组独立特征之间的匹配。 我正在使用HumanEvaI数据集,因此我有相机的校准矩阵(在这种特殊情况下为BW1和BW2)。
我不能使用像简单相关,SIFT或SURF这样的方法来解决问题,因为相机相距很远并且也是旋转的。因此,图像之间的差异很大,也存在遮挡。
由于我已经拥有的校准信息,我一直专注于在基于地面实况点匹配的捕获图像之间找到Homography。 一旦我有这个单应性,我将使用完美匹配(匈牙利算法)来找到最佳对应关系。这里单应性的重要性在于我必须计算点之间的距离。
到目前为止一切似乎都很好,我的问题是我找不到好的单应性。我尝试过RANSAC方法,带有sampson距离的黄金标准方法(这是一种非线性优化方法),主要来自Richard Hartley撰写的一本名为“计算机视觉中的多视图几何”第二版的书。
到目前为止,我已经在matlab中实现了所有内容。
还有其他办法吗?我是在正确的道路上吗?如果是这样,我可能做错了什么?
答案 0 :(得分:1)
您可以尝试以下两种方法:
答案 1 :(得分:0)
我认为您认为有用的另一种方法是here 这种方法试图将局部模型拟合到一组点。当存在多个不同的局部模型时,它的全局优化方法使其优于RANSAC 我也相信他们有代码可用。
答案 2 :(得分:0)
您可能会发现我的解决方案很有趣。它是Coherent Point Drift算法的纯粹numpy implementation。
以下是一个例子:
from functools import partial
from scipy.io import loadmat
import matplotlib.pyplot as plt
import numpy as np
import time
class RigidRegistration(object):
def __init__(self, X, Y, R=None, t=None, s=None, sigma2=None, maxIterations=100, tolerance=0.001, w=0):
if X.shape[1] != Y.shape[1]:
raise 'Both point clouds must have the same number of dimensions!'
self.X = X
self.Y = Y
(self.N, self.D) = self.X.shape
(self.M, _) = self.Y.shape
self.R = np.eye(self.D) if R is None else R
self.t = np.atleast_2d(np.zeros((1, self.D))) if t is None else t
self.s = 1 if s is None else s
self.sigma2 = sigma2
self.iteration = 0
self.maxIterations = maxIterations
self.tolerance = tolerance
self.w = w
self.q = 0
self.err = 0
def register(self, callback):
self.initialize()
while self.iteration < self.maxIterations and self.err > self.tolerance:
self.iterate()
callback(X=self.X, Y=self.Y)
return self.Y, self.s, self.R, self.t
def iterate(self):
self.EStep()
self.MStep()
self.iteration = self.iteration + 1
def MStep(self):
self.updateTransform()
self.transformPointCloud()
self.updateVariance()
def updateTransform(self):
muX = np.divide(np.sum(np.dot(self.P, self.X), axis=0), self.Np)
muY = np.divide(np.sum(np.dot(np.transpose(self.P), self.Y), axis=0), self.Np)
self.XX = self.X - np.tile(muX, (self.N, 1))
YY = self.Y - np.tile(muY, (self.M, 1))
self.A = np.dot(np.transpose(self.XX), np.transpose(self.P))
self.A = np.dot(self.A, YY)
U, _, V = np.linalg.svd(self.A, full_matrices=True)
C = np.ones((self.D, ))
C[self.D-1] = np.linalg.det(np.dot(U, V))
self.R = np.dot(np.dot(U, np.diag(C)), V)
self.YPY = np.dot(np.transpose(self.P1), np.sum(np.multiply(YY, YY), axis=1))
self.s = np.trace(np.dot(np.transpose(self.A), self.R)) / self.YPY
self.t = np.transpose(muX) - self.s * np.dot(self.R, np.transpose(muY))
def transformPointCloud(self, Y=None):
if not Y:
self.Y = self.s * np.dot(self.Y, np.transpose(self.R)) + np.tile(np.transpose(self.t), (self.M, 1))
return
else:
return self.s * np.dot(Y, np.transpose(self.R)) + np.tile(np.transpose(self.t), (self.M, 1))
def updateVariance(self):
qprev = self.q
trAR = np.trace(np.dot(self.A, np.transpose(self.R)))
xPx = np.dot(np.transpose(self.Pt1), np.sum(np.multiply(self.XX, self.XX), axis =1))
self.q = (xPx - 2 * self.s * trAR + self.s * self.s * self.YPY) / (2 * self.sigma2) + self.D * self.Np/2 * np.log(self.sigma2)
self.err = np.abs(self.q - qprev)
self.sigma2 = (xPx - self.s * trAR) / (self.Np * self.D)
if self.sigma2 <= 0:
self.sigma2 = self.tolerance / 10
def initialize(self):
self.Y = self.s * np.dot(self.Y, np.transpose(self.R)) + np.repeat(self.t, self.M, axis=0)
if not self.sigma2:
XX = np.reshape(self.X, (1, self.N, self.D))
YY = np.reshape(self.Y, (self.M, 1, self.D))
XX = np.tile(XX, (self.M, 1, 1))
YY = np.tile(YY, (1, self.N, 1))
diff = XX - YY
err = np.multiply(diff, diff)
self.sigma2 = np.sum(err) / (self.D * self.M * self.N)
self.err = self.tolerance + 1
self.q = -self.err - self.N * self.D/2 * np.log(self.sigma2)
def EStep(self):
P = np.zeros((self.M, self.N))
for i in range(0, self.M):
diff = self.X - np.tile(self.Y[i, :], (self.N, 1))
diff = np.multiply(diff, diff)
P[i, :] = P[i, :] + np.sum(diff, axis=1)
c = (2 * np.pi * self.sigma2) ** (self.D / 2)
c = c * self.w / (1 - self.w)
c = c * self.M / self.N
P = np.exp(-P / (2 * self.sigma2))
den = np.sum(P, axis=0)
den = np.tile(den, (self.M, 1))
den[den==0] = np.finfo(float).eps
self.P = np.divide(P, den)
self.Pt1 = np.sum(self.P, axis=0)
self.P1 = np.sum(self.P, axis=1)
self.Np = np.sum(self.P1)
def visualize(X, Y, ax):
plt.cla()
ax.scatter(X[:,0] , X[:,1], color='red')
ax.scatter(Y[:,0] , Y[:,1], color='blue')
plt.draw()
plt.pause(0.001)
def main():
fish = loadmat('./data/fish.mat')
X = fish['X'] # number-of-points x number-of-dimensions array of fixed points
Y = fish['Y'] # number-of-points x number-of-dimensions array of moving points
fig = plt.figure()
fig.add_axes([0, 0, 1, 1])
callback = partial(visualize, ax=fig.axes[0])
reg = RigidRegistration(X, Y)
reg.register(callback)
plt.show()
if __name__ == '__main__':
main()