如何重塑CIFAR数据集培训的输入?

时间:2017-12-01 12:32:40

标签: python keras

以下是我的代码

   (x_train, y_train), (x_test, y_test) = cifar10.load_data() 
    # Convert and pre-processing

    y_train = np_utils.to_categorical(y_train, num_classes)
    y_test = np_utils.to_categorical(y_test, num_classes)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train  /= 255
    x_test /= 255
    model = Sequential()
    model.add(Dense(10, input_shape=x_train.shape[1:], activation='relu'))
    model.add(Dense(10, input_shape=x_train.shape[1:],activation='softmax'))




    sgd = SGD(lr = 0.001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer='nadam',metrics=['accuracy'])
    model.summary()
    earlyStopping=EarlyStopping(monitor='val_loss', patience=0, verbose=2, mode='auto')
    checkpoint = ModelCheckpoint("filepath", monitor='val_acc', verbose=1, save_best_only=True, mode='max')
    history = History()
    print("Training...")
    model.fit(x_train, y_train, batch_size=32, nb_epoch=1000, verbose=2,validation_split=0.2,callbacks=[history,earlyStopping,checkpoint])

但它给出了一个错误。这是追溯

ValueError                                Traceback (most recent call last)
<ipython-input-18-9ea5b53744c6> in <module>()
      1 model = Sequential()
----> 2 model.add(Dense(10, input_shape=x_train.shape[1:], activation='relu'))
      3 model.add(Dense(10, input_shape=x_train.shape[1:],activation='softmax'))
      4 
      5 

/home/fatima/anaconda2/lib/python2.7/site-packages/keras/models.pyc in add(self, layer)
    440                 # and create the node connecting the current layer
    441                 # to the input layer we just created.
--> 442                 layer(x)
    443 
    444             if len(layer.inbound_nodes) != 1:

/home/fatima/anaconda2/lib/python2.7/site-packages/keras/engine/topology.pyc in __call__(self, inputs, **kwargs)
    601 
    602             # Actually call the layer, collecting output(s), mask(s), and shape(s).
--> 603             output = self.call(inputs, **kwargs)
    604             output_mask = self.compute_mask(inputs, previous_mask)
    605 

/home/fatima/anaconda2/lib/python2.7/site-packages/keras/layers/core.pyc in call(self, inputs)
    843         output = K.dot(inputs, self.kernel)
    844         if self.use_bias:
--> 845             output = K.bias_add(output, self.bias)
    846         if self.activation is not None:
    847             output = self.activation(output)

/home/fatima/anaconda2/lib/python2.7/site-packages/keras/backend/tensorflow_backend.pyc in bias_add(x, bias, data_format)
   3556         if data_format == 'channels_first':
   3557             if len(bias_shape) == 1:
-> 3558                 x += reshape(bias, (1, bias_shape[0], 1, 1))
   3559             else:
   3560                 x += reshape(bias, (1, bias_shape[2]) + bias_shape[:2])

/home/fatima/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/math_ops.pyc in binary_op_wrapper(x, y)
    863           else:
    864             raise
--> 865       return func(x, y, name=name)
    866 
    867   def binary_op_wrapper_sparse(sp_x, y):

/home/fatima/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/gen_math_ops.pyc in add(x, y, name)
     78     A `Tensor`. Has the same type as `x`.
     79   """
---> 80   result = _op_def_lib.apply_op("Add", x=x, y=y, name=name)
     81   return result
     82 

/home/fatima/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.pyc in apply_op(self, op_type_name, name, **keywords)
    765         op = g.create_op(op_type_name, inputs, output_types, name=scope,
    766                          input_types=input_types, attrs=attr_protos,
--> 767                          op_def=op_def)
    768         if output_structure:
    769           outputs = op.outputs

/home/fatima/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.pyc in create_op(self, op_type, inputs, dtypes, input_types, name, attrs, op_def, compute_shapes, compute_device)
   2630                     original_op=self._default_original_op, op_def=op_def)
   2631     if compute_shapes:
-> 2632       set_shapes_for_outputs(ret)
   2633     self._add_op(ret)
   2634     self._record_op_seen_by_control_dependencies(ret)

/home/fatima/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.pyc in set_shapes_for_outputs(op)
   1909       shape_func = _call_cpp_shape_fn_and_require_op
   1910 
-> 1911   shapes = shape_func(op)
   1912   if shapes is None:
   1913     raise RuntimeError(

/home/fatima/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.pyc in call_with_requiring(op)
   1859 
   1860   def call_with_requiring(op):
-> 1861     return call_cpp_shape_fn(op, require_shape_fn=True)
   1862 
   1863   _call_cpp_shape_fn_and_require_op = call_with_requiring

/home/fatima/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/common_shapes.pyc in call_cpp_shape_fn(op, require_shape_fn)
    593     res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
    594                                   input_tensors_as_shapes_needed,
--> 595                                   require_shape_fn)
    596     if not isinstance(res, dict):
    597       # Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).

/home/fatima/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/common_shapes.pyc in _call_cpp_shape_fn_impl(op, input_tensors_needed, input_tensors_as_shapes_needed, require_shape_fn)
    657       missing_shape_fn = True
    658     else:
--> 659       raise ValueError(err.message)
    660 
    661   if missing_shape_fn:

ValueError: Dimensions must be equal, but are 3 and 10 for 'dense_14/add' (op: 'Add') with input shapes: [?,3,32,10], [1,10,1,1].

我确实意识到这个网络不是完美的网络。但我想在这里理解重塑的概念(因为这是我猜的问题)。请告诉我如何解决此问题以及产生此错误的原因。 在此先感谢:)

2 个答案:

答案 0 :(得分:0)

y_train由一个形状为(50000,1)的数组组成。你需要重塑为(50000,10)

具有softmax激活函数的图层中的10个类= cifar10类的数量

from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train, num_classes)

您正试图在没有卷积层的情况下处理此图像分类问题。如果你尝试这样的东西,它会工作正常

from keras.layers.convolutional import Convolution2D, MaxPooling2D, Flatten
model = Sequential()
model.add(Convolution2D(3, (3,3), padding='same', input_shape=(32,32,3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Convolution2D(3, (3,3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(10, activation='softmax'))

答案 1 :(得分:0)

我没有兴趣将整个CIFAR数据集下载到这台笔记本电脑上,所以我将用手工构建的数据进行说明。

y_train = np.random.randint(0,10,100) #100 samples with values 0-9
x_train = np.random.randint(0,255,32*32*3*100).reshape(100,-1) #100 samples with quite a lot of features. Thought CIFAR images were 32*32*3..
num_classes = y_train.max()+1 #10
y_train = np_utils.to_categorical(y_train, num_classes)
x_train = x_train.astype('float32')
x_train  /= 255
model = Sequential()
model.add(Dense(10, input_shape=x_train.shape[1:], activation='relu'))
model.add(Dense(10,activation='softmax'))
sgd = optimizers.SGD(lr = 0.001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer='nadam',metrics=['accuracy'])
model.summary()
earlyStopping=EarlyStopping(monitor='val_loss', patience=10, verbose=2, mode='auto')
print("Training...")
model.fit(x_train, y_train, batch_size=32, nb_epoch=1000, 
verbose=2,validation_split=0.2,callbacks=[earlyStopping])

收益顺利。这里的关键是重塑x_train数据。