python-ValueError:检查目标时出错:预期activation_19的形状为(2,),但数组的形状为(1,)

时间:2018-07-12 23:08:17

标签: python tensorflow machine-learning keras conv-neural-network

我正在用keras构建一个CNN,将面部表情分为7类,就像魅力一样工作,问题是当我尝试将数据集缩减为仅2个面部表情时,标题出现错误:< / p>

ValueError: Error when checking target: expected activation_19 to have shape (2,) but got array with shape (1,)

当我减少这样的问题时,我不确定为什么我的模型会下降到形状数组(1,),这是我的模型:

@staticmethod
def buildDeeperCNN(width, height, depth, classes, n, m, l2rate, dropout_rate):
    # Initialize model
    model = Sequential()

    # Define inputShape and configure format to match keras'
    if(K.image_data_format() == 'channels_first'):
        input_shape = (depth, height, width)
        chan_idx = 1
    else:
        input_shape = (height, width, depth)
        chan_idx = -1

    # First block
    model.add(Conv2D(32, (3, 3), input_shape = input_shape,
                     padding = 'same', strides = 1, use_bias = True,
                     kernel_initializer = 'he_normal',
                     bias_initializer = 'he_normal',
                     kernel_regularizer = l2(l2rate),
                     bias_regularizer = l2(l2rate)))
    model.add(BatchNormalization(axis = chan_idx))
    model.add(Activation('relu'))

    # Second block
    for n in range(0, n):
        model.add(Conv2D(32, (3, 3), padding = 'same', strides = 1,
                         kernel_initializer='he_normal',
                         bias_initializer='he_normal',
                         kernel_regularizer = l2(l2rate),
                         bias_regularizer = l2(l2rate)))
        model.add(BatchNormalization(axis = chan_idx))
        model.add(Activation('relu'))

        model.add(Conv2D(32, (3, 3), padding = 'same', strides = 1,
                         kernel_initializer='he_normal',
                         bias_initializer='he_normal',
                         kernel_regularizer = l2(l2rate),
                         bias_regularizer = l2(l2rate)))
        model.add(BatchNormalization(axis = chan_idx))
        model.add(Activation('relu'))

        model.add(MaxPooling2D(pool_size = (2, 2), strides = 2))
        model.add(Dropout(dropout_rate))

    # Third block
    for n in range(0, m):
        model.add(Conv2D(64, (3, 3), padding = 'same', strides = 1,
                         kernel_initializer='he_normal',
                         bias_initializer='he_normal',
                         kernel_regularizer = l2(l2rate),
                         bias_regularizer = l2(l2rate)))
        model.add(BatchNormalization(axis = chan_idx))
        model.add(Activation('relu'))

        model.add(Conv2D(64, (3, 3), padding = 'same', strides = 1,
                         kernel_initializer='he_normal',
                         bias_initializer='he_normal',
                         kernel_regularizer = l2(l2rate),
                         bias_regularizer = l2(l2rate)))
        model.add(BatchNormalization(axis = chan_idx))
        model.add(Activation('relu'))

        #model.add(MaxPooling2D(pool_size = (2, 2), strides = 2))
        model.add(MaxPooling2D(pool_size = (2, 2), strides = 1))
        model.add(Dropout(dropout_rate))

    # Fourth block
    model.add(Flatten())
    model.add(Dense(512, kernel_initializer='he_normal',
                    bias_initializer='he_normal',
                    kernel_regularizer = l2(l2rate),
                    bias_regularizer = l2(l2rate)))
    model.add(BatchNormalization(axis = chan_idx))
    model.add(Activation('relu'))
    model.add(Dropout(dropout_rate))

    model.add(Dense(256, kernel_initializer='he_normal',
                    bias_initializer='he_normal',
                    kernel_regularizer = l2(l2rate),
                    bias_regularizer = l2(l2rate)))
    model.add(BatchNormalization(axis = chan_idx))
    model.add(Activation('relu'))
    model.add(Dropout(dropout_rate))

    model.add(Dense(128, kernel_initializer='he_normal',
                    bias_initializer='he_normal',
                    kernel_regularizer = l2(l2rate),
                    bias_regularizer = l2(l2rate)))
    model.add(BatchNormalization(axis = chan_idx))
    model.add(Activation('relu'))
    model.add(Dropout(dropout_rate))

    # v-- If I print 'classes' I get 2 as expected
    model.add(Dense(classes))
    print('model_classes: ', classes)
    model.add(Activation("softmax"))

    return model

我尝试搜索此特定问题无济于事,我不确定为什么keras试图使我的分类落入形状(1,)插入形状(2,)的数组中,因为我指定我的最后一层有两个节点。对于解决此问题的任何帮助将不胜感激。

0 个答案:

没有答案