我将Windows 8.1 core I4与python 3.7和Opencv 4.1一起使用。我想检测框架中的黑色物体,并用绿色方块显示它们,最暗的物体用红色方块显示。我写了一个应该检测图像中黑点的代码。并找到最暗的一个并将其标记为目标。当我顺序执行代码时,它工作正常。我需要减少使用Multiprocessing.pool()
的运行时间。这是我的原始代码,没有进行多处理(我在每个段落中添加了一个命令行来描述处理)
class Detection:
def __init__(self):
self.image = []
self.image_filter_hvs = []
def first_filter(self):
# Blur image to remove noise
image_filter_gaussian = cv2.GaussianBlur(self.image.copy(), (3, 3), 0)
# Convert image from BGR to HSV
self.image_filter_hvs = cv2.cvtColor(image_filter_gaussian, cv2.COLOR_BGR2HSV)
def filters(self, lower, higher):
# Set pixels to white if in color range, others to black (binary bitmap)
image_binary_one_color = cv2.inRange(self.image_filter_hvs, lower, higher)
# Dilate image to make white blobs larger
binary_image = cv2.dilate(image_binary_one_color, None, iterations=2)
return binary_image
def find_contours(self, binary_image):
contours = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
target_possible_position = []
# remove some contours to accelerate the program
if len(contours) > 0:
for ContFrame in contours: # contours' in a frame
area = cv2.contourArea(ContFrame)
if 200 > area > 15:
xx, yy, ww, hh = cv2.boundingRect(ContFrame)
# center of each contour
center = (int(xx + ww / 2), int(yy + hh / 2))
# temp= color of central pixel
temp = self.image[center[1]][center[0]][2]
# temp_array= [color [xx, yy, ww, hh]]
target_possible_position.append([temp, [xx, yy, ww, hh]])
return target_possible_position
def find_best_contours(self, target_position):
if len(target_position) > 0:
first_item = itemgetter(0)
# sort by the color of central pixel to remove lighter contours
target_position = sorted(target_position, key=first_item)
# remove some contours to decrease the running time
for i in range(round(len(target_position) / 4)):
temp1 = target_position[i]
xx = temp1[1][0]
yy = temp1[1][1]
ww = temp1[1][2]
hh = temp1[1][3]
# Draw a green circle around each contour
cv2.rectangle(self.image, (xx, yy), (xx + ww, yy + hh), (0, 255, 0), 2)
# Draw a red square around the darkest contour by center pixel
# temp_array[1][0]=xx temp_array[1][1]=yy temp_array[1][2]=ww temp_array[1][3]=hh
cv2.rectangle(self.image, (target_position[0][1][0], target_position[0][1][1]),
(target_position[0][1][0] + target_position[0][1][2],
target_position[0][1][1] + target_position[0][1][3]),
(0, 0, 255), 2)
# return estimated value which will use in tracker
return \
target_position[0][1][0], \
target_position[0][1][1], \
target_position[0][1][2], \
target_position[0][1][3]
# Main
target_estimated_location = []
cam = cv2.VideoCapture("data2.avi")
ob = Detection()
while True:
ret_val, frame = cam.read()
if not ret_val:
print("can not open the video")
break
# detection
ob.image = frame
ob.first_filter()
binary_image_1 = ob.filters((0, 0, 0), (255, 100, 130))
target_position = ob.find_contours(binary_image_1)
target_locations = ob.find_best_contours(target_position)
Output_image = ob.image
cv2.imshow('webcam', Output_image)
cv2.waitKey(20)
cam.release()
我想减少运行时间。因此我决定使用多处理,并将For循环更改为multiprocessing.pool以使用所有CPU能力。
class Detection:
def __init__(self):
self.image = []
self.image_filter_hvs = []
def first_filter(self): # this part filter the frame
# Blur image to remove noise
image_filter_gaussian = cv2.GaussianBlur(self.image.copy(), (3, 3), 0)
# Convert image from BGR to HSV
self.image_filter_hvs = cv2.cvtColor(image_filter_gaussian, cv2.COLOR_BGR2HSV)
def filters(self, lower, higher): # put a threshold to remove some colors
# Set pixels to white if in color range, others to black (binary bitmap)
image_binary_one_color = cv2.inRange(self.image_filter_hvs, lower, higher)
# Dilate image to make white blobs larger
binary_image = cv2.dilate(image_binary_one_color, None, iterations=2)
return binary_image
def find_contours(self, binary_image): # find contours in the frame
contours = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
return contours
def Green_contours(self, ContFrame):
# remove some contours to accelerate the program
if len(ContFrame) > 0:
area = cv2.contourArea(ContFrame)
if 200 > area > 15:
xx, yy, ww, hh = cv2.boundingRect(ContFrame)
# center of each contour
center = (int(xx + ww / 2), int(yy + hh / 2))
# Draw a green circle around the largest enclosed contour
# temp= color of central pixel
temp = self.image[center[1]][center[0]][2]
# temp_array= [color [xx, yy, ww, hh]]
target_possible_position = [temp, [xx, yy, ww, hh]]
return target_possible_position
def find_best_contours(self, target_position):
if len(target_position) > 0:
first_item = itemgetter(0)
target_position = sorted(target_position, key=first_item)
for i in range(round(len(target_position) / 4)):
temp1 = target_position[i]
xx = temp1[1][0]
yy = temp1[1][1]
ww = temp1[1][2] # width of the target
hh = temp1[1][3] # hieght of the target
cv2.rectangle(self.image, (xx, yy), (xx + ww, yy + hh), (0, 255, 0), 2)
# temp_array[1][0]=xx temp_array[1][1]=yy temp_array[1][2]=ww temp_array[1][3]=hh
cv2.rectangle(self.image, (target_position[0][1][0], target_position[0][1][1]),
(target_position[0][1][0] + target_position[0][1][2],
target_position[0][1][1] + target_position[0][1][3]),
(0, 0, 255), 2)
# return estimated value which will use in tracker
return \
target_position[0][1][0], \
target_position[0][1][1], \
target_position[0][1][2], \
target_position[0][1][3]
if __name__ == "__main__":
# Main
cam = cv2.VideoCapture("data1.mp4")
ob = Detection()
while True:
ret_val, frame = cam.read()
if not ret_val:
print("can not open the video")
break
# detection
ob.image = frame
ob.first_filter()
binary_image_1 = ob.filters((0, 0, 0), (255, 100, 130))
conts = ob.find_contours(binary_image_1)
p = Pool(processes=3)
green_cont = p.map(ob.Green_contours, conts)
#to remove NONe statements
green_cont = [x for x in green_cont if x is not None]
target_position = green_cont
target_locations = ob.find_best_contours(target_position)
# Get image from camera
Output_image = ob.image
# Show image's windows
cv2.imshow('webcam', Output_image)
cv2.waitKey(1)
cam.release()
在此代码中,我使用了Multiprocessing.pool()
而不是for循环。所以我认为它应该更有效。但它太慢了。每5分钟更新一次。
谢谢
答案 0 :(得分:1)
有一个已知的cv2 issue破坏了OSX上的多处理。解决方法是配置要使用“ spawn”而不是“ fork”创建的新流程:
multiprocessing.set_start_method('spawn')
在创建多处理池之前添加以上代码。