Tensorflow无法再打开MNIST

时间:2019-03-13 10:48:32

标签: python tensorflow

我又开始在tensorflow上工作。我正在重新发布几年前做的一些代码,但是它没有用。

旧版本

from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data", one_hot=True)

import tensorflow as tf

# Parameters
learning_rate = 0.001
training_epochs = 20
batch_size = 128  # Decrease batch size if you don't have enough memory
display_step = 1

n_input = 784  # MNIST data input (img shape: 28*28)
n_classes = 10  # MNIST total classes (0-9 digits)

n_hidden_layer = 256 # layer number of features

# Store layers weight & bias
weights = {
    'hidden_layer': tf.Variable(tf.random_normal([n_input, n_hidden_layer])),
    'out': tf.Variable(tf.random_normal([n_hidden_layer, n_classes]))
}
biases = {
    'hidden_layer': tf.Variable(tf.random_normal([n_hidden_layer])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}

# tf Graph input
x = tf.placeholder("float", [None, 28, 28, 1])
y = tf.placeholder("float", [None, n_classes])

x_flat = tf.reshape(x, [-1, n_input])

# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x_flat, weights['hidden_layer']),\
    biases['hidden_layer'])
layer_1 = tf.nn.relu(layer_1)
# Output layer with linear activation
logits = tf.add(tf.matmul(layer_1, weights['out']), biases['out'])

# Define loss and optimizer
cost = tf.reduce_mean(\
    tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\
    .minimize(cost)

# Initializing the variables
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    # Training cycle
    for epoch in range(training_epochs):
        total_batch = int(mnist.train.num_examples/batch_size)
        # Loop over all batches
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})

据我了解,原因来自“ read_data_sets”,我应该使用“ tf.data”。 “ tf.data”的问题是我不能再使用它了:

mnist.train.num_examples
mnist.train.next_batch

数据不是一个编码的。

我尝试过类似的事情:

import tensorflow_datasets as tfds
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

# Mandatory: to launch 
#tf.enable_eager_execution() 

mnist_data, info = tfds.load("mnist", with_info=True, as_supervised=True)
mnist_train, mnist_test = mnist_data["train"], mnist_data["test"]

然后,使用mnist_train.batch而不是mnist.train.next_batch

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    # Training cycle
    for epoch in range(training_epochs):
        total_batch = int(info.splits["train"].num_examples/batch_size)
        print(total_batch)
        # Loop over all batches
        for i in range(total_batch):
            batch_x, batch_y = mnist_train.batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})

出现错误:

RuntimeError: dataset.__iter__() is only supported when eager execution is enabled.

如果我这样做:

tf.enable_eager_execution() 

我不能使用

tf.placeholder()

新版本

import tensorflow_datasets as tfds
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

# Mandatory: to launch 
#tf.enable_eager_execution() 

mnist_data, info = tfds.load("mnist", with_info=True, as_supervised=True)
mnist_train, mnist_test = mnist_data["train"], mnist_data["test"]

    import tensorflow as tf

    # Parameters
    learning_rate = 0.001
    training_epochs = 20
    batch_size = 128  # Decrease batch size if you don't have enough memory
    display_step = 1

    n_input = 784  # MNIST data input (img shape: 28*28)
    n_classes = 10  # MNIST total classes (0-9 digits)

    n_hidden_layer = 256 # layer number of features

    # Store layers weight & bias
    weights = {
        'hidden_layer': tf.Variable(tf.random_normal([n_input, n_hidden_layer])),
        'out': tf.Variable(tf.random_normal([n_hidden_layer, n_classes]))
    }
    biases = {
        'hidden_layer': tf.Variable(tf.random_normal([n_hidden_layer])),
        'out': tf.Variable(tf.random_normal([n_classes]))
    }

    # tf Graph input
    x = tf.placeholder("float", [None, 28, 28, 1])
    y = tf.placeholder("float", [None, n_classes])

    x_flat = tf.reshape(x, [-1, n_input])

    # Hidden layer with RELU activation
    layer_1 = tf.add(tf.matmul(x_flat, weights['hidden_layer']),\
        biases['hidden_layer'])
    layer_1 = tf.nn.relu(layer_1)
    # Output layer with linear activation
    logits = tf.add(tf.matmul(layer_1, weights['out']), biases['out'])

    # Define loss and optimizer
    cost = tf.reduce_mean(\
        tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\
        .minimize(cost)

    # Initializing the variables
    init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    # Training cycle
    for epoch in range(training_epochs):
        total_batch = int(info.splits["train"].num_examples/batch_size)
        print(total_batch)
        # Loop over all batches
        for i in range(total_batch):
            batch_x, batch_y = mnist_train.batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})

1 个答案:

答案 0 :(得分:0)

使用tfds.load加载数据时,将获得tf.data.Dataset的实例。您不能直接将其馈送到feed_dict,而必须进行迭代,并将在每一步从迭代器获得的值馈入可输入的输入中。您可以执行以下大致操作:

# one hot encode for 10 MNIST classes
def my_one_hot(feature, label):
    return feature, tf.one_hot(label, depth=10)

# load your data from tfds
mnist_train, train_info = tfds.load(name="mnist", with_info=True, as_supervised=True, split=tfds.Split.TRAIN)

# convert your labels in one-hot
mnist_train = mnist_train.map(my_one_hot)

# you can batch your data here
mnist_train = mnist_train.batch(8)

然后您可以启动图形(并初始化迭代器)

# Launch the graph
with tf.Session() as sess:
    sess.run(init)

    # make an iterator
    train_iterator = mnist_train.make_initializable_iterator()
    next_element = train_iterator.get_next()

    # Training cycle
    for epoch in range(training_epochs):

        sess.run(train_iterator.initializer)
        batch_train_x, batch_train_y = sess.run(next_element)

        total_batch = int(train_info.splits["train"].num_examples/batch_size)
        print(total_batch)
        # Loop over all batches
        for i in range(total_batch):
            # Run optimization op (backprop) and cost op (to get loss value)
            sess.run(optimizer, feed_dict={x: batch_train_x, y: batch_train_y})

有关tf.data.Dataset的更多信息,请查看guide。希望对您有帮助!