我尝试了不同的解决方案,但仍然面临问题。其实我是Ml / DL(python)的新手。 在哪种情况下,我们会遇到以下错误:“无法压缩dim 1,期望尺寸为1,得到5”? 请在这里帮助我,我在这里做错了什么,什么是正确的
这里是InvalidArgumentError错误回溯(最近一次通话最后一次)
---------------------------------------------------------------------------
<ipython-input-9-0826122252c2> in <module>()
98 model.summary()
99 model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
--> 100 history=model.fit(trainX, trainY, batch_size=32, epochs=10, verbose=1)
4 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/client/session.py in __call__(self, *args, **kwargs)
1470 ret = tf_session.TF_SessionRunCallable(self._session._session,
1471 self._handle, args,
-> 1472 run_metadata_ptr)
1473 if run_metadata:
1474 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
InvalidArgumentError: Can not squeeze dim[1], expected a dimension of 1, got 5
[[{{node metrics_4/acc/Squeeze}}]]
这是我的代码
# create a mapping of tags to integers given the loaded mapping file
def create_tag_mapping(mapping_csv):
# create a set of all known tags
labels = set()
for i in range(len(mapping_csv)):
# convert space separated tags into an array of tags
tags = mapping_csv['Labels'][i].split(' ')
# add tags to the set of known labels
labels.update(tags)
# convert set of labels to a list to list
labels = list(labels)
# order set alphabetically
labels.sort()
# dict that maps labels to integers, and the reverse
labels_map = {labels[i]:i for i in range(len(labels))}
inv_labels_map = {i:labels[i] for i in range(len(labels))}
return labels_map, inv_labels_map
# create a mapping of filename to tags
def create_file_mapping(mapping_csv):
mapping = dict()
for i in range(len(mapping_csv)):
name, tags = mapping_csv['Id'][i], mapping_csv['Labels'][i]
mapping[name] = tags.split(' ')
return mapping
# create encoding for one list of tags
def custom_encode(tags, mapping):
# create an empty vector
encoding=[]
for tag in tags:
if tag == 'L':
encoding.append(1)
elif tag == 'M':
encoding.append(2)
else:
encoding.append(3)
return encoding
def load_dataset(path, file_mapping, tag_mapping):
photos, targets = list(), list()
# enumerate files in the directory
for filename in os.listdir(path):
# load image
photo = load_img(path + filename, target_size=(760,415))
# convert to numpy array
photo = img_to_array(photo, dtype='uint8')
# get tags
tags = file_mapping[filename[:-4]]
# one hot encode tags
target = custom_encode(tags, tag_mapping)
# one hot encode tags
#print(target)
# store
photos.append(photo)
targets.append(target)
X = np.asarray(photos, dtype='uint8')
y = np.asarray(targets, dtype='uint8')
return X, y
# define location of dataset
trainingLabels = csvPath
# load the mapping file
mapping_csv = pd.read_csv(trainingLabels)
# create a mapping of tags to integers
tag_mapping, _ = create_tag_mapping(mapping_csv)
# create a mapping of filenames to tag lists
file_mapping = create_file_mapping(mapping_csv)
# load the png images
folder = '/dataset/'
X, y = load_dataset(folder, file_mapping, tag_mapping)
print(X.shape, y.shape)
trainX, testX, trainY, testY = train_test_split(X, y, test_size=0.3, random_state=1)
print(trainX.shape, trainY.shape, testX.shape, testY.shape)
img_x,img_y=760,415
model = Sequential()
model.add(Conv2D(32, (5, 5), strides=(1,1), activation='relu', input_shape=(img_x, img_y,3)))
model.add(MaxPooling2D((2, 2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(15))
model.summary()
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history=model.fit(trainX, trainY, batch_size=32, epochs=10, verbose=1)
答案 0 :(得分:0)
面对这个错误,因为我共有5个属性,每个属性都有3个标签 根据上述情况,如果我们以这种方式编码标签,我们将不会遇到上述错误
def custom_encode(tags, mapping):
# create empty vector
encoding=[]
for tag in tags:
if tag == 'L':
encoding.append([1,0,0])
elif tag == 'M':
encoding.append([0,1,0])
else:
encoding.append([0,0,1])
return encoding
并像这样创建最后一层
model.add(Dense(15)) #because we have total 5 labels and each has 3 tags so 15 neurons will be on final layer
model.add(Reshape((5,3))) # each 5 have further 3 tags we need to reshape it
model.add(Activation('softmax'))
model.compile(optimizer=opt, loss='categorical_crossentropy',
metrics=['accuracy'])