我想用几个优化器测试一个tensorflow分类器。使用此代码:
optimizers = [
tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy),
tf.train.AdadeltaOptimizer(learning_rate).minimize(cross_entropy),
tf.train.AdagradOptimizer(learning_rate).minimize(cross_entropy),
tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy),
tf.train.FtrlOptimizer(learning_rate).minimize(cross_entropy),
tf.train.ProximalGradientDescentOptimizer(learning_rate).minimize(cross_entropy),
tf.train.ProximalAdagradOptimizer(learning_rate).minimize(cross_entropy),
tf.train.RMSPropOptimizer(learning_rate).minimize(cross_entropy)]
for optimizer in optimizers:
print(optimizer)
我收到了这个错误:
TypeError: init ()缺少1个必需的位置参数:' name'
请帮助。
答案 0 :(得分:1)
在tensorflow.org上的MNIST tutorial之后,将其与您的优化器数组相结合,我可以获得所有准确率。您收到的错误消息似乎来自不同的地方。
<强>代码:强>
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
learning_rate = 0.5
optimizers = [
tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy),
tf.train.AdadeltaOptimizer(learning_rate).minimize(cross_entropy),
tf.train.AdagradOptimizer(learning_rate).minimize(cross_entropy),
tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy),
tf.train.FtrlOptimizer(learning_rate).minimize(cross_entropy),
tf.train.ProximalGradientDescentOptimizer(learning_rate).minimize(cross_entropy),
tf.train.ProximalAdagradOptimizer(learning_rate).minimize(cross_entropy),
tf.train.RMSPropOptimizer(learning_rate).minimize(cross_entropy)]
for optimizer in optimizers:
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(optimizer, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
<强>输出:强> 0.9157 0.8832 0.9169 0.098 0.917 0.9149 0.917 0.098