Tensorflow:ValueError:无效时无法加载save_path

时间:2018-03-29 17:09:56

标签: python-3.x tensorflow

        import os
        import tarfile
        from six.moves import urllib

        URL = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
        PATH = 'aclImdb'

        def fetch_data(url = URL, path = PATH):
            if not os.path.isdir(path):
                os.makedirs(path)
                file_path = os.path.join(oath, "aclImdb_v1.tar.gz")
            urllib.request.urlretrieve(url, file_path)
            file_gz = tarfile.open(file_path)
            file_gz.extractall(path = path)
            file_gz.close()

    import pyprind # for progress visualisation
    import pandas as pd
    PATH = 'aclImdb'

    labels = {'pos': 1, 'neg': 0} # int class labels for 'positive' and 'negative'
    pbar = pyprind.ProgBar(50000) # initialise a progress bar with 50k iterations = no. of docs

    df = pd.DataFrame()

    # use nested for loops to iterate over 'train' & 'test' subdir
    for s in ('test', 'train'): 
        for l in ('pos', 'neg'): # and read text files from 'pos' and 'neg' subdir
            path = os.path.join(PATH, s, l)
            for file in os.listdir(path):
                # append to the df pandas DataFrame with an int class (post = 1, neg = 0)
                with open(os.path.join(path, file), 'r', encoding = 'utf-8') as infile:
                    txt = infile.read()
                    df = df.append([[txt, labels[l]]], ignore_index = True)
                    pbar.update()

    df.columns = ['review', 'sentiment']

import numpy as np

np. random.seed(0)
df = df.reindex(np.random.permutation(df.index))
df.to_csv('movie_data.csv', index = False, encoding = 'utf-8')
        n_words = max(list(word_to_int.values())) + 1

df = pd.read_csv('movie_data.csv', encoding = 'utf-8')
df.head(3)


# Separate words and count each word's occurence
import pyprind # for progress visualisation
from collections import Counter
from string import punctuation
import re

counts = Counter() # collects the counts of occurence of each unique word

pbar = pyprind.ProgBar(len(df['review']), 
                       title = 'Counting word occurences...') # progress bar

for i, review in enumerate(df['review']):
    text = ''.join([c if c not in punctuation else ' '+c+' '
                    for c in review]).lower()
    df.loc[i, 'review'] = text
    pbar.update()
    counts.update(text.split())


# Mapping each unique word to an int

word_counts = sorted(counts, key = counts.get, reverse = True)
print(word_counts[:5])
word_to_int = {word: ii for ii, word in enumerate(word_counts, 1)}

mapped_reviews = []

pbar = pyprind.ProgBar(len(df['review']),
                       title = 'Map movie reviews to integers...')
# Left-pad with zeros if the sequence length < 200
# Use 200 elements if the length > 200

sequence_length = 200
sequences = np.zeros((len(mapped_reviews), sequence_length), dtype = int)

for i, row in enumerate(mapped_reviews):
    review_arr = np.array(row)
    sequences[i, -len(row):] = review_arr[-sequence_length:]

# Split the dataset into training and test sets

X_train = sequences[:25000, :]
y_train = df.loc[:25000, 'sentiment'].values

X_test = sequences[25000:, :]
y_test = df.loc[25000:, 'sentiment'].values

# Define the mini-batches generator

np.random.seed(123)

def batch_gen(x, y = None, batch_size = 64):
    n_batches = len(x) // batch_size
    x = x[:n_batches * batch_size]

    if y is not None:
        y = y[:n_batches * batch_size]
    for ii in range(0, len(x), batch_size):
        if y is not None:
            yield x[ii : ii + batch_size], y[ii : ii + batch_size]
        else:
            yield x[ii : ii + batch_size]

import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' ## suppress the 3.5 warning if using TF 1.4

class SentimentRNN(object):

    # Define __init__
    def __init__(self, 
                 n_words, 
                 seq_len = 200, 
                 lstm_size = 256, 
                 num_layers = 1, 
                 batch_size = 64,
                 learning_rate = 0.0001,
                 embed_size = 200):
        self.n_words = n_words
        self.seq_len = seq_len
        self.lstm_size = lstm_size # no. of hidden units
        self.num_layers = num_layers
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.embed_size = embed_size

        self.g = tf.Graph()
        with self.g.as_default():
            tf.set_random_seed(123)
            self.build()
            self.saver = tf.train.Saver()
            self.init_op = tf.global_variables_initializer()


    # Define the build method
    def build(self):

        # Define the placeholders
        tf_x = tf.placeholder(tf.int32,
                              shape = (self.batch_size, self.seq_len),
                              name = 'tf_x')
        tf_y = tf.placeholder(tf.float32,
                              shape = (self.batch_size),
                              name = 'tf_y')
        tf_keepprob = tf.placeholder(tf.float32,
                                    name = 'tf_keepprob')



        # Create the embedding layer
        embedding = tf.Variable(
            tf.random_uniform(
                shape = (self.n_words, self.embed_size),
                minval = -1,
                maxval = 1),
            name = 'embedding')
        embed_x = tf.nn.embedding_lookup(embedding, 
                                         tf_x,
                                         name = 'embed_x')



        # Define LSTM cells and stack them
        cells = tf.contrib.rnn.MultiRNNCell(
            [tf.contrib.rnn.DropoutWrapper(
                tf.contrib.rnn.BasicLSTMCell(num_units = self.lstm_size),
                output_keep_prob = tf_keepprob)
            for i in range(self.num_layers)])



        # Define the initial state:
        self.initial_state = cells.zero_state(
            self.batch_size, tf.float32)
        print('  << initial state >> ', self.initial_state)



        # Put together components with tf.nn.dynamic_rnn
        lstm_outputs, self.final_state = tf.nn.dynamic_rnn(
            cell = cells, 
            inputs = embed_x,
            initial_state = self.initial_state)
        ## lstm_outputs shape: [batch_size, max_time, cells.output_size]
        print('\n  << lstm_output   >> ', lstm_outputs)
        print('\n  << final state   >> ', self.final_state)



        # Apply a full-connected layer on the RNN output
        logits = tf.layers.dense(
            inputs = lstm_outputs[:, -1],
            units = 1, # dimensionality of the output space
            activation = None,
            name = 'logits')

        # Remove dimensions of size 1 from the tensor shape
        logits = tf.squeeze(input = logits, 
                            name = 'logits_squeezed')
        print ('\n  << logits        >> ', logits)


        # If you want prob's
        y_proba = tf.nn.sigmoid(logits, name = 'probabilities')
        predictions = {'probabilities' : y_proba, 
                       'labels' : tf.cast(tf.round(y_proba), 
                                         tf.int32,
                                         name = 'labels')}
        print('\n  << predictions   >> ', predictions)



        # Define the cost function
        cost = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels = tf_y, 
                logits = logits), 
            name = 'cost')



        # Define the optimiser
        optimizer = tf.train.AdamOptimizer(self.learning_rate)
        train_op = optimizer.minimize(cost, name = 'train_op')



    # Define the train method
    def train(self, X_train, y_train, num_epochs):

        with tf.Session(graph = self.g) as sess:
            sess.run(self.init_op)
            iteration = 1
            for epoch in range(num_epochs):
                state = sess.run(self.initial_state)

                for batch_x, batch_y in batch_gen(
                    X_train, 
                    y_train, 
                    batch_size = self.batch_size):

                    feed = {'tf_x:0' : batch_x,
                            'tf_y:0' : batch_y,
                            'tf_keepprob:0' : 0.5,
                            self.initial_state : state}

                    loss, _, state = sess.run(
                        ['cost:0',
                         'train_op',
                         self.final_state], 
                        feed_dict=feed)

                    if iteration % 20 == 0:
                        print("Epoch: %d/%d Iteration: %d "
                              "| Train loss: %.5f" % (
                                  epoch + 1, 
                                  num_epochs,
                                  iteration, 
                                  loss))

                    iteration += 1

                if (epoch + 1) % 10 == 0:
                    self.saver.save(
                        sess,
                        "model/sentiment-%d.ckpt" % epoch)



    # Define the predict method
    def predict(self, X_data, return_proba=False):

        preds = []

        with tf.Session(graph = self.g) as sess:
            self.saver.restore(
                sess, 
                tf.train.latest_checkpoint('model/'))
            test_state = sess.run(self.initial_state)

            for ii, batch_x in enumerate(batch_gen(
                x = X_data,
                y = None,
                batch_size = self.batch_size), 1):

                feed = {'tf_x:0' : batch_x,
                        'tf_keepprob:0' : 1.0,
                        self.initial_state : test_state}
                if return_proba:
                    pred, test_state = sess.run(
                        ['probabilities:0', self.final_state],
                        feed_dict=feed)
                else:
                    pred, test_state = sess.run(
                        ['labels:0', self.final_state],
                        feed_dict=feed)

                preds.append(pred)

        return np.concatenate(preds)
for review in df['review']:
    mapped_reviews.append([word_to_int[word] for word in review.split()])
    pbar.update()



                rnn = SentimentRNN(n_words = n_words, 
                                  seq_len = sequence_length,
                                  embed_size = 256,
                                  lstm_size = 128,
                                  num_layers = 1,
                                  batch_size = 100,
                                  learning_rate = 0.001)
                preds = rnn.predict(X_test)
                y_true = y_test\[:len(preds)\]
                print('Test accuracy... %.3f' % (np.sum(preds == y_true) / len(y_true)))][1]

使用以下参数创建SentimentRNN类的对象:

n_words = n_words,seq_len = sequence_length,embed_size = 256,lstm_size = 128,num_layers = 1,batch_size = 100,learning_rate = 0.001。

由于我们有一个相对较小的数据集,层数= 1可能会更好地概括

enter image description here

ValueError                                Traceback (most recent call last)
<ipython-input-23-a3cfe03a9a49> in <module>()
----> 1 preds = rnn.predict(X_test)
      2 y_true = y_test[:len(preds)]
      3 print('Test accuracy... %.3f' % (np.sum(preds == y_true) / len(y_true)))

<ipython-input-12-d83ee67c43b6> in predict(self, X_data, return_proba)
    173             self.saver.restore(
    174                 sess,
--> 175                 tf.train.latest_checkpoint('model/'))
    176             test_state = sess.run(self.initial_state)
    177 

/usr/local/anaconda/lib/python3.6/site-packages/tensorflow/python/training/saver.py in restore(self, sess, save_path)
   1680       return
   1681     if save_path is None:
-> 1682       raise ValueError("Can't load save_path when it is None.")
   1683     logging.info("Restoring parameters from %s", save_path)
   1684     if context.in_graph_mode():

ValueError: Can't load save_path when it is None.

1 个答案:

答案 0 :(得分:0)

错误只表示toString()没有找到任何内容。它返回tf.train.latest_checkpoint,然后None投诉,因为它已通过Saver。所以该目录中没有检查点。