我正在尝试通过参考以下文章来进行手写分类:https://github.com/priya-dwivedi/Deep-Learning/blob/master/handwriting_recognition/English_Writer_Identification.ipynb。
在拟合模型时,我收到一条错误消息,指出fir_generator不需要任何此类参数!
同样,虽然错误本身是一个意外的参数错误,但标记显示为类型错误,我想知道我的管道是否存在问题。
这是模型。 (我将排除错误后的所有代码,因为它与任何方式都无关。如果您认为重要,可以参考上面的链接中的代码)
Tensorflow版本-1.14,Keras版本-2.2.4
from __future__ import division
import numpy as np
import os
import glob
from PIL import Image
from random import *
from tensorflow.keras.utils
import to_categorical
from sklearn.preprocessing
import LabelEncoder
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Lambda, ELU, Activation, BatchNormalization
from tensorflow.keras.layers import Convolution2D, Cropping2D, ZeroPadding2D, MaxPooling2D
from tensorflow.keras.optimizers import SGD, Adam, RMSprop
import tensorflow
import tensorflow.keras
# Create sentence writer mapping
#Dictionary with form and writer mapping
d = {}
with open('forms_for_parsing.txt') as f:
for line in f:
key = line.split(' ')[0]
writer = line.split(' ')[1]
d[key] = writer
tmp = []
target_list = []
path_to_files = os.path.join('datab', '*')
for filename in sorted(glob.glob(path_to_files)):
tmp.append(filename)
image_name = filename.split(os.sep)[1]
file, ext = os.path.splitext(image_name)
parts = file.split('-')
form = parts[0] + '-' + parts[1]
for key in d:
if key == form:
target_list.append(str(d[form]))
img_files = np.asarray(tmp)
img_targets = np.asarray(target_list)
# Visualizing the data
for filename in img_files[:3]:
img=mpimg.imread(filename)
plt.figure(figsize=(10,10))
plt.imshow(img, cmap ='gray')
# Label Encode writer names for one hot encoding later
encoder = LabelEncoder()
encoder.fit(img_targets)
encoded_Y = encoder.transform(img_targets)
print(img_files[:5], img_targets[:5], encoded_Y[:5])
#split into test train and validation in ratio 4:1:1
from sklearn.model_selection import train_test_split
train_files, rem_files, train_targets, rem_targets = train_test_split(
img_files, encoded_Y, train_size=0.66, random_state=52, shuffle= True)
validation_files, test_files, validation_targets, test_targets = train_test_split(
rem_files, rem_targets, train_size=0.5, random_state=22, shuffle=True)
print(train_files.shape, validation_files.shape, test_files.shape)
print(train_targets.shape, validation_targets.shape, test_targets.shape)
# Generator function for generating random crops from each sentence
# # Now create generators for randomly cropping 113x113 patches from these images
batch_size = 16
num_classes = 50
# Start with train generator shared in the class and add image augmentations
def generate_data(samples, target_files, batch_size=batch_size, factor = 0.1 ):
num_samples = len(samples)
from sklearn.utils import shuffle
while 1: # Loop forever so the generator never terminates
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
batch_targets = target_files[offset:offset+batch_size]
images = []
targets = []
for i in range(len(batch_samples)):
batch_sample = batch_samples[i]
batch_target = batch_targets[i]
im = Image.open(batch_sample)
cur_width = im.size[0]
cur_height = im.size[1]
# print(cur_width, cur_height)
height_fac = 113 / cur_height
new_width = int(cur_width * height_fac)
size = new_width, 113
imresize = im.resize((size), Image.ANTIALIAS) # Resize so height = 113 while keeping aspect ratio
now_width = imresize.size[0]
now_height = imresize.size[1]
# Generate crops of size 113x113 from this resized image and keep random 10% of crops
avail_x_points = list(range(0, now_width - 113 ))# total x start points are from 0 to width -113
# Pick random x%
pick_num = int(len(avail_x_points)*factor)
# Now pick
random_startx = sample(avail_x_points, pick_num)
for start in random_startx:
imcrop = imresize.crop((start, 0, start+113, 113))
images.append(np.asarray(imcrop))
targets.append(batch_target)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(targets)
#reshape X_train for feeding in later
X_train = X_train.reshape(X_train.shape[0], 113, 113, 1) time , and use -1
X_train = X_train.astype('float32')
X_train /= 255
#One hot encode y
y_train = to_categorical(y_train, num_classes)
yield shuffle(X_train, y_train) # literraly shuffel
train_generator = generate_data(train_files, train_targets, batch_size=batch_size, factor = 0.3)
validation_generator = generate_data(validation_files, validation_targets, batch_size=batch_size, factor = 0.3)
test_generator = generate_data(test_files, test_targets, batch_size=batch_size, factor = 0.1)
history_object = model.fit_generator(train_generator, steps_per_epoch= samples_per_epoch1,
validation_data=validation_generator,
nb_val_samples=nb_val_samples, nb_epoch=nb_epoch, verbose=1, callbacks=callbacks_list)
错误日志如下-
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-34-54937a660f6c> in <module>
1 history_object = model.fit_generator(train_generator, steps_per_epoch= samples_per_epoch1,
2 validation_data=validation_generator,
----> 3 nb_val_samples=nb_val_samples, nb_epoch=nb_epoch, verbose=1, callbacks=callbacks_list)
TypeError: fit_generator() got an unexpected keyword argument 'nb_val_samples'
答案 0 :(得分:2)
在Keras 2.0之后,您的代码中将nb_val_samples
关键字编码为“ validation_steps . Also, I saw
nb_epoch”关键字。它的编码为epoches
。
如果您不想更改关键字,只需将Keras降级到2.0版以下