当我运行以下python代码时: 错误:(-215:断言失败)inputs.size()==必需函数中的输出
我不知道如何使用blobFromImage函数的参数
blob = cv2.dnn.blobFromImage(image,1,(64,64))
或如何知道inputs.size()
image = cv2.imread('aa.jpg')
(h, w) = image.shape[:2]
cv2.imshow('image',image)
print(h,w)
image=cv2.resize(image,(64,64))
#blob = cv2.dnn.blobFromImage(cv2.resize(image, (64, 64)), 0.007843, (64, 64), 127.5)
blob = cv2.dnn.blobFromImage(image,1,(64,64))
# pass the blob through the network and obtain the detections and
# predictions
print("[INFO] computing object detections...")
net.setInput(blob)
detections = net.forward()
培训代码如下:
#import a lot of
# initialize the data and labels
print("[INFO] loading images...")
data = []
labels = []
# grab the image paths and randomly shuffle them
imagePaths = sorted(list(paths.list_images(args["dataset"])))
print(*imagePaths)
random.seed(42)
random.shuffle(imagePaths)
print("----------------after shuffle-------------------")
print(*imagePaths)
# loop over the input images
for imagePath in imagePaths:
# load the image, resize it to 64x64 pixels (the required input
# spatial dimensions of SmallVGGNet), and store the image in the
# data list
image = cv2.imread(imagePath)
image = cv2.resize(image, (64, 64))
data.append(image)
# extract the class label from the image path and update the
# labels list
label = imagePath.split(os.path.sep)[-2]
labels.append(label)
# scale the raw pixel intensities to the range [0, 1]
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
(trainX, testX, trainY, testY) = train_test_split(data,
labels, test_size=0.25, random_state=42)
# convert the labels from integers to vectors (for 2-class, binary
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)
aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,
height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
horizontal_flip=True, fill_mode="nearest")
model = SmallVGGNet.build(width=64, height=64, depth=3,
classes=len(lb.classes_))
opt = SGD(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])