我试图与Python 3和joblib并行计算图像对的单应性。我们在左图像中有对象的边界框,并希望在右图中找到相应的边界框。但是,使用下面显示的方法,似乎只使用了我的一个机器核心。
#THIS CORRESPONDS TO ONE LOOP ITERATION.
def processOneImage(params):
#Unpack params
leftFname, leftImgDir, rightImgDir, leftDetectionDir, rightDetectionDir = params
#Load detections. Early out if none.
objectDetections = []
detFname = leftDetectionDir + leftFname[:-4] + '.txt'
with open(detFname, 'r') as f:
for line in f:
objectDetections.append(line.split())
f.close()
if len(objectDetections) == 0:
return None
#Load images.
leftImg = cv2.imread(leftImgDir + leftFname)
rightImg = cv2.imread(rightImgDir + leftFname)
#Compute homography.
H = computeHomography(leftImg, rightImg)
if H is None:
return None
#Compute Bounding boxes and write out.
outFname = rightDetectionDir + leftFname[:-4] + '.txt'
outFile = open(outFname, 'w')
processLabelBoundingBoxes(leftImg, rightImg, H, objectDetections, outFile)
outFile.flush()
outFile.close()
#HERE WE RUN THE "LOOP" IN PARALLEL
def processDataset(leftImgFnames, leftImgDir, rightImgDir, leftDetectionDir, rightDetectionDir):
numItems = len(leftImgFnames)
params = zip(leftImgFnames, [leftImgDir]*numItems, [rightImgDir]*numItems, [leftDetectionDir]*numItems, [rightDetectionDir]*numItems)
res = joblib.Parallel(n_jobs=4, backend="multiprocessing")(map(joblib.delayed(processOneImage), params))
是否可以以并行方式使用OpenCV?如果是这样,我是否必须以某种方式指定OpenCV运行哪个线程?
编辑:我也尝试过python多处理库中的Pools,但结果是一样的。
def processDataset(leftImgFnames, leftImgDir, rightImgDir, leftDetectionDir, rightDetectionDir):
numItems = len(leftImgFnames)
params = zip(leftImgFnames, [leftImgDir]*numItems, [rightImgDir]*numItems, [leftDetectionDir]*numItems, [rightDetectionDir]*numItems)
p = multiprocessing.Pool(4)#I have 4 cores/threads.
p.map(processOneImage, params)
为完整起见,这里有辅助函数
#THIS USES OPENCV
def computeHomography(leftImg, rightImg):
#Compute ORB keypoints and descriptors for both images.
orb = cv2.ORB_create()
keypointsLeft, descriptorsLeft = orb.detectAndCompute(leftImg, None)
keypointsRight, descriptorsRight = orb.detectAndCompute(rightImg, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(descriptorsLeft, descriptorsRight)
#Verify match quality.
goodMatches = []
for m in matches:
goodMatches.append(m)
if len(goodMatches) > 10:
src_pts = np.float32([keypointsLeft[m.queryIdx].pt for m in goodMatches]).reshape(-1,1,2)
dst_pts = np.float32([keypointsRight[m.trainIdx].pt for m in goodMatches]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
return M
return None
#THIS USES OPENCV
def processLabelBoundingBoxes(leftImg, rightImg, H, objectDetections, outFile):
instanceCounts = dict()
for detection in objectDetections:
objID = detection[0]
objConf = float(detection[5])
bb_x = int(detection[1])
bb_y = int(detection[2])
bb_w = int(detection[3]) - int(detection[1])
bb_h = int(detection[4]) - int(detection[2])
rect = (bb_x, bb_y, bb_w, bb_h)
maskLeft = np.zeros(leftImg.shape[:2], np.uint8)
#Replace this with the code below for graphcuts. This is bounding box.
for x in range(bb_x, bb_x + bb_w):
for y in range(bb_y, bb_y + bb_h):
if x >= 0 and x < leftImg.shape[1] and y >= 0 and y < leftImg.shape[0]:
maskLeft[y][x] = 255
maskRight = cv2.warpPerspective(maskLeft, H, (leftImg.shape[1], leftImg.shape[0]))
#TO-DO: Find a way to do this automatically, with cv2 maybe...
#Currently too lazy, it's late...
xmin = rightImg.shape[1]
xmax = 0
ymin = rightImg.shape[0]
ymax = 0
for y in range(0, rightImg.shape[0]):
for x in range(0, rightImg.shape[1]):
if maskRight[y][x] > 0:
if y < ymin:
ymin = y
if y > ymax:
ymax = y
if x < xmin:
xmin = x
if x > xmax:
xmax = x
bbox_width = xmax - xmin
bbox_height = ymax - ymin
outFile.write("%s %d %d %d %d %f\n" % (objID, xmin, ymin, bbox_width, bbox_height, objConf))