for key in copy_reference_dataset:
model_line_number = 0
model_dictionary_elements_number = len(copy_reference_dataset[key])
while model_line_number < model_dictionary_elements_number:
compt = compt + 1
if key in plus_min_dict:
plus_min_dict[key].append(copy_reference_dataset[key][model_line_number])
else:
plus_min_dict[key] = [copy_reference_dataset[key][model_line_number]]
validation_bool = False
dictionary_elements_number = len(compared_to_dataset[key])
ra = Rectangle(int(copy_reference_dataset[key][model_line_number][0]),
int(copy_reference_dataset[key][model_line_number][1]),
int(copy_reference_dataset[key][model_line_number][2]),
int(copy_reference_dataset[key][model_line_number][3]))
model_line_number = model_line_number+1
line_number = 0
while line_number < dictionary_elements_number and \
validation_bool is False:
rb = Rectangle(int(copy_compared_to_dataset[key][line_number][0]),
int(copy_compared_to_dataset[key][line_number][1]),
int(copy_compared_to_dataset[key][line_number][2]),
int(copy_compared_to_dataset[key][line_number][3]))
# print (rb)
line_number = line_number + 1
if area(ra, rb) >= (area(ra, ra)/2):
# print ('ground truth area is' )
plus_precision = plus_precision + 1
validation_bool = True
elif line_number == dictionary_elements_number:
minus_precision = minus_precision + 1
我的代码功能齐全,但速度相当慢,因为两个字典之间的迭代和比较,每个字典的值可以超过300000项,我已经做了一些关于多处理的研究,并意识到池可能是最好的选择并行运行4个处理器应该是一个不错的选择,任何关于如何实现我的第一个多处理调整的提示都将受到赞赏。