似乎我在弄乱准备数据集的步骤。无法找到正确的答案或在文档中查找正确的解决方案。我用下面的###指出了问题所在。
def parse_file(data_path):
imagepaths = list()
labels = list()
# a working parser for os is here
imagepaths = tf.constant(imagepaths, dtype=tf.string)
labels = tf.constant(labels, dtype=tf.float32)
return imagepaths, labels
def parse_image(imagepath, label):
image_string = tf.read_file(imagepath)
image_decoded = tf.image.decode_png(image_string, channels=3)
# The image size is 425x425.
image_resized = tf.image.resize_images(image_decoded, [img_size, img_size])
image_normalized = image_resized * 1.0/255
print(image_normalized)
print(label)
return image_normalized, label
dataset = tf.data.Dataset.from_tensor_slices((parsed_files))
dataset = dataset.map(parse_image)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
iterator = iterator.get_next()
x = tf.placeholder(tf.float32, [None, img_size, img_size, channels])
y = tf.placeholder(tf.float32, [None, 1])
(模型在这里无关紧要。)
with tf.Session() as sess:
### AttributeError: 'tuple' object has no attribute 'initializer'
sess.run(iterator.initializer)
batch_x, batch_y = iterator.get_next()
test1, test2 = sess.run([batch_x, batch_y])
total_batch = int(total_input[0] / batch_size)
# define the iterator for the network
for epoch in range(epochs):
avg_cost = 0
for i in range(total_batch):
batch_x, batch_y = sess.run(iterator)
_, c = sess.run([optimiser, cross_entropy], feed_dict={x: batch_x, y: batch_y})
avg_cost += c / total_batch
test_acc = sess.run(accuracy,feed_dict={x: test_x, y: np.expand_dims(test_y, axis=-1)})
print("Epoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost), " test accuracy: {:.3f}".format(test_acc))
summary = sess.run(merged, feed_dict={x: test_x, y: np.expand_dims(test_y, axis=-1)})
print("\nTraining complete!")
print(sess.run(accuracy, feed_dict={x: test_x, y: np.expand_dims(test_y, axis=-1)}))
答案 0 :(得分:1)
我没有使用tf.Datasets的经验,但这可能会出错:
iterator = dataset.make_initializable_iterator()
iterator = iterator.get_next()
首先,创建一个迭代器,然后使用.get_next方法从其中查询数据来覆盖它。这显然给你一个元组。然后,您可以这样做:
sess.run(iterator.initializer)
您会收到错误消息,因为迭代器不再是make_initializable_iterator()中的迭代器。您是否尝试过:
iterator = dataset.make_initializable_iterator()
with tf.Session() as sess:
sess.run(iterator.initializer)
在那之后您可能会得到更多错误,但是也许是错误的,因为我不习惯使用tf.Datasets。
看看这个示例,我发现了here:
max_value = tf.placeholder(tf.int64, shape=[])
dataset = tf.data.Dataset.range(max_value)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
# Initialize an iterator over a dataset with 10 elements.
sess.run(iterator.initializer, feed_dict={max_value: 10})
for i in range(10):
value = sess.run(next_element)
assert i == value
# Initialize the same iterator over a dataset with 100 elements.
sess.run(iterator.initializer, feed_dict={max_value: 100})
for i in range(100):
value = sess.run(next_element)
assert i == value