ValueError:尺寸必须相等,但对于发生'{{node MatMul_63}}

时间:2020-10-18 19:17:42

标签: python tensorflow deep-learning neural-network conv-neural-network

我对神经网络真的了解不多。以下代码段还应满足哪些其他要求?

错误:

ValueError:尺寸必须相等,但对于'{{node MatMul_63}} = BatchMatMulV2 [T = DT_FLOAT,adj_x = false,adj_y = false](Reshape_11,Variable_318 / read)',尺寸为1和28: [60000,28,28,1],[28,256]。

代码:

# Parameters
learning_rate = 0.001
training_epochs = 2000
cost_history = np.empty(shape= [1], dtype = float)
n_dim = X_train.shape[1]
batch_size = 100
#display_step = 1

# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_hidden_3 = 256 # 3rd layer number of features
n_hidden_4 = 256 # 4th layer number of features
n_class = 10 # MNIST total classes 

# tf Graph input
x = tf.placeholder(tf.float32, [None ,n_dim])
x = tf.reshape(x, shape=[60000, n_dim,n_dim,1])
w = tf.Variable(tf.zeros([n_dim, n_class]))
b = tf. Variable(tf.zeros([n_class]))
y_ = tf.placeholder(tf.float32, [None, n_class])



# Create model
def multilayer_perceptron(x, weights, biases):
    # Hidden layer with Sigmoid activation
    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
    layer_1 = tf.nn.sigmoid(layer_1)
    # Hidden layer Sigmoid activation
    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
    layer_2 = tf.nn.sigmoid(layer_2)

    layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
    layer_3 = tf.nn.sigmoid(layer_3)

    layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
    layer_4 = tf.nn.sigmoid(layer_4)

    # Output layer with linear activation
    out_layer = tf.matmul(layer_4, weights['out']) + biases['out']
    return out_layer

print(x.shape)
print(X_train.shape)

weights = {
    'h1':tf.Variable(tf.truncated_normal([n_dim, n_hidden_1])),
    'h2':tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2])),
    'h3':tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3])),
    'h4':tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4])),
    'out':tf.Variable(tf.truncated_normal([n_hidden_4, n_class]))
}
bias = {
    'b1': tf.Variable(tf.truncated_normal([n_hidden_1])),
    'b2': tf.Variable(tf.truncated_normal([n_hidden_2])),
    'b3': tf.Variable(tf.truncated_normal([n_hidden_3])),
    'b4': tf.Variable(tf.truncated_normal([n_hidden_4])),
    'out': tf.Variable(tf.truncated_normal([n_class]))
}
# Construct model
pred = multilayer_perceptron(x, weights, bias)

cost = tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Initializing the variables
init = tf.global_variables_initializer()
#create an empty list to store the cost history and accuracy history
cost_history = []
accuracy_history = []

0 个答案:

没有答案