我是Tensorflow中的新手,并尝试在python 3.6中做MNIST示例。
我建立了代码,以查看错误在#次迭代中的变化,但是在feed_dict
内的sess.run
上却给我一个错误。
下面是我的代码,
import tensorflow as tf
import numpy as np
import functools
import sys
sys.path.append('./utils')
from mnist import MNIST
def lazy_property(function):
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Model:
def __init__(self, image, label):
self.image = image
self.label = label
self.logits
self.prediction
self.optimize
self.error
@lazy_property
def logits(self):
weight = tf.Variable(tf.zeros([img_size_flat, num_classes]))
#print (img_size_flat)
biases = tf.Variable(tf.zeros([num_classes]))
#print (num_classes)
equation = tf.matmul(self.image, weight) + biases
return equation
@lazy_property
def prediction(self):
return tf.nn.softmax(self.logits)
@lazy_property
def optimize(self):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.label)
cost = tf.reduce_mean(cross_entropy)
return tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cost)
@lazy_property
def error(self):
y_pred_cls = tf.argmax(self.prediction, axis=1)
mistakes = tf.not_equal(y_true_cls, y_pred_cls)
#print(mistakes)
error_from_acc = tf.reduce_mean(tf.cast(mistakes, tf.float32))
return error_from_acc
batch_size = 100
num_steps = 1000
tf.reset_default_graph()
data = MNIST(data_dir="data/MNIST/")
img_size_flat = data.img_size_flat
img_shape = data.img_shape
num_classes = data.num_classes
image = tf.placeholder(tf.float32, [None, img_size_flat])
label = tf.placeholder(tf.float32, [None, num_classes])
y_true_cls = tf.placeholder(tf.int64, [None])
#print (y_true_cls)
model = Model(image, label)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
for step in range(num_steps):
error = session.run(model.error, {x: data.x_test, y_true: data.y_test}) # Gives me an error message from HERE!!!!
x_batch, y_true_batch, _ = data.random_batch(batch_size=batch_size)
session.run(model.optimize, {x: x_batch, y_true: y_true_batch})
if (step % 100 == 0):
print("Error rate @ iter %d : %f" % (step, error))
我做错了什么?
我应该将feed_dict({x: data.x_test, y_true: data.y_test}
和{x: x_batch, y_true: y_true_batch}
)放在哪里?
还有,我在代码中做错了什么地方吗?
请在这里帮助我。
谢谢。
答案 0 :(得分:1)
您已经按照以下方式定义了占位符:
image = tf.placeholder(tf.float32, [None, img_size_flat])
label = tf.placeholder(tf.float32, [None, num_classes])
,但是随后您将x
和y_true
作为占位符传递到session.run
中:
session.run(model.error, {x: data.x_test, y_true: data.y_test})
因此,您需要将x
和y_true
替换为image
和label
,并且应该没事:
session.run(model.error, {image : data.x_test, label : data.y_test})