我正在尝试将与此CNN相关的修改后的python代码运行到此链接中:https://github.com/nephashi/GaitRecognitionCNN
并显示错误:
文件“ /root/PycharmProjects/CNNCheck/Run_CNN.py”,第60行,在 model.add(Conv2D121(8,(5,5),padding ='valid'))
“ strong> init
中的文件“ /root/PycharmProjects/CNNCheck/layers/Conv2D121.py”,第35行self.data_format = conv_utils.normalize_data_format(数据格式)
AttributeError:模块'keras.utils.conv_utils'没有属性'normalize_data_format'
我创建了一个名为:CNNCheck的python项目
包含一个名为:Run_CNN.py的python文件和一个名为:layers的目录,其中包含一个名为:Conv2D121.py的文件,其编码如下:
Run_CNN.py
import keras
from keras.layers import Activation, Dense
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
import numpy as np
import cv2
import os
from layers.Conv2D121 import Conv2D121
path1="/home/sanjay/CASIA_B90PerfectCentrallyAlinged_Resized_with_140by140_Energy_Image/"
#path1="/home/sanjay/CASIA_B90PerfectCentrallyAlinged_CODEI_OneCycle_frames-20_Resized_with_140by140_Energy_Image/"
all_images = []
all_labels = []
subjects = os.listdir(path1)
numberOfSubject = len(subjects)
print('Number of Subjects: ', numberOfSubject)
for number1 in range(0, numberOfSubject): # numberOfSubject
path2 = (path1 + subjects[number1] + '/')
sequences = os.listdir(path2);
numberOfsequences = len(sequences)
for number2 in range(4, numberOfsequences):
path3 = path2 + sequences[number2]
img = cv2.imread(path3 , 0)
img = img.reshape(140, 140, 1)
all_images.append(img)
all_labels.append(number1)
x_train = np.array(all_images)
y_train = np.array(all_labels)
y_train = keras.utils.to_categorical(y_train)
all_images = []
all_labels = []
for number1 in range(0, numberOfSubject): # numberOfSubject
path2 = (path1 + subjects[number1] + '/')
sequences = os.listdir(path2);
numberOfsequences = len(sequences)
for number2 in range(0, 4):
path3 = path2 + sequences[number2]
img = cv2.imread(path3 , 0)
img = img.reshape(140, 140, 1)
all_images.append(img)
all_labels.append(number1)
x_test = np.array(all_images)
y_test = np.array(all_labels)
y_test = keras.utils.to_categorical(y_test)
batch_size = 4
num_classes = 123
epochs = 10000
model = Sequential()
model.add(Conv2D(8, (5, 5), padding='valid',
input_shape=(140, 140, 1)))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='valid'))
model.add(Conv2D121(8, (5, 5), padding='valid'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='valid'))
model.add(Conv2D121(8, (5, 5), padding='valid'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='valid'))
model.add(Conv2D121(8, (5, 5), padding='valid'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='valid'))
model.add(Flatten())
model.add(Dense(num_classes, input_shape=(200,)))
model.add(Activation('softmax'))
model.summary()
Conv2D121.py
from keras import backend as K
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.engine.base_layer import Layer
from keras.engine.base_layer import InputSpec
from keras.utils import conv_utils
# 121 means one-to-one connection :)
class Conv2D121(Layer):
def __init__(self, filters,
kernel_size,
strides=1,
rank=2,
padding='valid',
data_format=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
):
super(Conv2D121, self).__init__(**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
# normalize_padding: 检查padding的值,只有['valid', 'same', 'causal']三个值合法
self.padding = conv_utils.normalize_padding(padding)
# data_format: 检查
self.data_format = conv_utils.normalize_data_format(data_format)
self.use_bias = use_bias,
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
self.input_dim = input_dim
if input_dim != self.filters:
raise ValueError('Because nature of one-to-one connnection, '
'input dimension must be equal to filters number')
kernel_shape = self.kernel_size + (1, 1)
self.kernels = []
for i in range(input_dim):
self.kernels.append(self.add_weight(
shape=kernel_shape,
# initializer=self.kernel_initializer,
initializer=self.kernel_initializer,
name='kernel' + str(i),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint
))
if self.use_bias:
self.bias = self.add_weight(
shape=(self.input_dim,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint
)
else:
self.bias = None
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs, **kwargs):
if self.rank != 2:
raise ValueError('currently this layer only support 2D data.')
input_slices = []
# now we need to slice the input_dim dimension input and do convolution
for i in range(self.input_dim):
slice = K.expand_dims(inputs[:, :, :, i], axis=3)
input_slices.append(slice)
output_slices = []
for i in range(self.input_dim):
slice = K.conv2d(
input_slices[i],
self.kernels[i],
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
output_slices.append(slice)
output = K.concatenate(output_slices, axis=3)
if (self.use_bias):
output = K.bias_add(
output,
self.bias,
data_format=self.data_format
)
return output
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.filters,)
if self.data_format == 'channels_first':
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i])
new_space.append(new_dim)
return (input_shape[0], self.filters) + tuple(new_space)
我们如何解决该错误,发生为:
文件“ /root/PycharmProjects/CNNCheck/Run_CNN.py”,第60行,在 model.add(Conv2D121(8,(5,5),padding ='valid'))
“ strong> init
中的文件“ /root/PycharmProjects/CNNCheck/layers/Conv2D121.py”,第35行self.data_format = conv_utils.normalize_data_format(数据格式)
AttributeError:模块'keras.utils.conv_utils'没有属性'normalize_data_format'
答案 0 :(得分:0)
存在keras版本问题。
我安装了由keras 2.2.4安装的keras 2.2.0
因此问题得以解决。