当我运行此代码时
import numpy as np
import cv2
from sklearn.datasets import fetch_mldata
from skimage.measure import label, regionprops
from sklearn.neighbors import KNeighborsClassifier
def train(data, target):
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(data, target)
return knn
def move(image, x, y):
img = np.zeros((28, 28))
img[:(28-x), :(28-y)] = image[x:, y:]
return img
def fill(image):
if np.shape(image)!=(28, 28):
img = np.zeros((28,28))
x = 28 - np.shape(image)[0]
y = 28 - np.shape(image)[1]
img[:-x,:-y] = image
return img
else:
return image
def my_rgb2gray(img_rgb):
img_gray = 0.5*img_rgb[:, :, 0] + 0*img_rgb[:, :, 1] + 0.5*img_rgb[:, :, 2]
img_gray = img_gray.astype('uint8')
return img_gray
def my_rgb2gray2(img_rgb):
frame = img_rgb
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, frame_bw = cv2.threshold(grey, 170, 255, 0)
frame_bw = cv2.morphologyEx(frame_bw, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
return frame_bw
def count_images(framecal):
regions = label(framecal)
labels = regionprops(regions)
images = []
for i in range(0, len(labels)):
if labels[i].centroid[0] < result[0] and labels[i].centroid[1] < result[1]:
images.append(labels[i].image)
count = 0
for img in images:
obrada = fill(np.array(img.astype('uint8')))
count += model.predict(obrada.reshape(1, -1))
return count
def check2(indices, i):
check = False
for el in indices:
if (el == i):
check = True
break
return check
def findPoints(lines):
Xmin = 1000
Ymin = 1000
Ymax = 0
Xmax = 0
for i in range(len(lines)):
for x1, y1, x2, y2 in lines[i]:
if x1 < Xmin:
Xmin = x1
Ymin = y1
if x2 > Xmax:
Ymax = y2
Xmax = x2
return Xmin, Ymin, Xmax, Ymax
def hough(frame, gray, min_line_len, max_line_gap):
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
cv2.imwrite('line.png', frame)
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 40, min_line_len, max_line_gap)
minx, miny, maxx, maxy = findPoints(lines)
cv2.line(frame, (minx, miny), (maxx, maxy), (233, 0, 0), 2)
return minx, miny, maxx, maxy
homepath = 'SoftVideoData/'
videopaths = ['video-0.avi',
'video-1.avi',
'video-2.avi',
'video-3.avi',
'video-4.avi',
'video-5.avi',
'video-6.avi',
'video-7.avi',
'video-8.avi',
'video-9.avi']
mnist = fetch_mldata('MNIST original')
data = mnist.data>0
data = data.astype('uint8')
target = mnist.target
fixed = np.empty_like(data)
for i in range(0, len(data)):
l = label(data[i].reshape(28, 28))
r = regionprops(l)
min_x = r[0].bbox[0]
min_y = r[0].bbox[1]
for j in range(1, len(r)):
if r[j].bbox[0] < min_x:
min_x = r[j].bbox[0]
if r[j].bbox[1] < min_y:
min_y = r[j].bbox[1]
img = move(data[i].reshape(28, 28), min_x, min_y)
fixed[i] = img.reshape(784, )
model = train(fixed, target)
for index in range(0,9):
total = 0
video = cv2.VideoCapture(homepath + videopaths[index])
flag, frame = video.read()
bw = my_rgb2gray(frame)
result = hough(frame, bw, 10, 50)
while 1:
flag1, frame1 = video.read()
last_count = total
if flag1 is True:
bwframe = my_rgb2gray2(frame1)
curr_count = count_images(bwframe)
if curr_count <= last_count:
last_count = curr_count
else:
total += curr_count - last_count
last_count = curr_count
print total
k = cv2.waitKey(15) & 0xff
if k == 27:
break
else:
break
with open('out.txt', 'a') as file:
file.write(homepath + videopaths[index] + '\t' + str(total))
我收到此错误:
Traceback (most recent call last):
File "C:\Users\Joe\Desktop\SOFT-master7o\SoftProject.py", line 147, in <module>
bw = my_rgb2gray(frame)
File "C:\Users\Joe\Desktop\SOFT-master7o\SoftProject.py", line 35, in my_rgb2gray
img_gray = 0.5*img_rgb[:, :, 0] + 0*img_rgb[:, :, 1] + 0.5*img_rgb[:, :, 2]
TypeError: 'NoneType' object has no attribute '__getitem__'
怎么了?感谢
答案 0 :(得分:0)
当您使用方括号调用名称时,Python calla&#39; getitem
&#39;引擎盖下。
所以这意味着img_rgb
不是你所期望的那样。它不是numpy
数组,而是None
。
检查分配了img_rgb
的代码部分。
答案 1 :(得分:0)
回答评论:
在对输入进行操作之前检查输入。
我会使用VS inbuild调试器并在此行上设置断点:
bw = my_rgb2gray(frame)
并在进入该功能之前检查每个frame
是否为None
。
如果None
如何处理它?取决于 - 要么跳过该视频帧,要么全部为无,则有些不对劲,您需要检查为什么flag, frame = video.read()
会产生frame
None
。有时文档会帮助你: