运行以下代码,但它不起作用。变量cost
总是一个充满1.0的张量,但为什么呢?我估计了一个标量,因为1x5矩阵乘以5x1矩阵是一个标量。优化时偏差和重量也不会改变。我做错了什么?
#KI-Model
x = tf.placeholder(tf.float32, [None, 5], name='input') #x_1-x_5
#Init the graph
W = tf.Variable(tf.zeros([5,1]))
b = tf.Variable(tf.zeros([1]))
#activation with sigmoid
y = tf.nn.sigmoid(tf.matmul(x, W) + b) #berechneter Wert für y
#Training
y_tensor = tf.placeholder(tf.float32, [None, 1], name='output')
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_tensor * tf.log(y), reduction_indices=[1])) #Hier Cross-Entropie statt minimum squares method
loss = y-y_tensor
cost = tf.square(loss)
optimizer = tf.train.GradientDescentOptimizer(0.001).minimize(cost)
#Start
session = tf.Session() #Google-> was ist das?
init = tf.global_variables_initializer()
session.run(init)
#init first 1000 training_batches
for i in range(1000):
batch_xs.append([dataA[i], dataB[i], dataC[i], dataD[i],
dataE[i]])
batch_ys.append(dataG[i])
for i in range(10000):
session.run(optimizer, feed_dict={x: batch_xs, y_tensor: batch_ys})
print(session.run(cost, feed_dict={x: batch_xs, y_tensor: batch_ys}))