我正在尝试在tensorflow-1.0.0中实现一个简单的rnn,并且每次在第一次时加载脚本后重新加载脚本时,我都会收到此错误:
ValueError: Variable rnn/rnn/basic_lstm_cell/weights already exists, disallowed. Did you mean to set reuse=True in VarScope? Originally defined at:
我的脚本是:
from __future__ import print_function
import os
import nltk
import pickle
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
import app
from prelude import *
from utils import *
os.system('clear')
'''
Rough idea, determine Pr[ rating | h_T]
- experiment with:
* one hot encoding
* pretrained word vectors
'''
############################################################
'''
Load data
'''
root = os.getcwd()
data_dir = os.path.join(root, 'data/aclImdb/')
out_dir = os.path.join(root, 'tutorials/imdb/output/')
'''
Settings
'''
SETTING = {'UNK' : '<unk>'
,'PAD' : '_'
,'End-of-Paragraph': '<EOP>'
,'VOCAB_SIZE' : 6000
,'min-length' : 5
,'max-length' : 25}
imdb = Imdb(SETTING, data_dir, out_dir)
############################################################
'''
RNN
training parameters
'''
learn_rate = 0.001
train_iters = 100000
batch_size = 128
display_step = 10
'''
network parameters
'''
n_input = SETTING['VOCAB_SIZE'] # one hot vector for each word
n_steps = SETTING['max-length'] # maximum of 25 words per review
n_hidden = 128
n_classes = 2
'''
graph input
'''
X = tf.placeholder(tf.float32, [None, n_input, n_steps])
Y = tf.placeholder(tf.float32, [None, n_classes] )
'''
network parameters
'''
theta = {
'W': tf.Variable(tf.random_normal([n_hidden, n_classes]))
,'b': tf.Variable(tf.random_normal([n_classes]))
}
'''
@Use: given input X and parameters theta,
output unormalized response to
'''
def RNN(X, theta):
'''
conform data shape to rnn function requirements
X shape : batch-size * col * row
required shape: col * batch_size * row
'''
X = tf.reshape (X , [-1, n_input])
X = tf.split (X , n_steps, 0 )
with tf.variable_scope('lstm'):
# define instance of lstm cell
lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias = 1.0)
outputs, states = rnn.static_rnn(lstm_cell, X, dtype=tf.float32)
yhat = tf.matmul(outputs[-1],theta['W']) + theta['b']
return yhat
Yhat = RNN(X, theta)
我知道它与变量范围有关,但我不确定如何调整变量的范围,以便每次重新加载脚本时都不会抛出错误?
答案 0 :(得分:1)
这是整个剧本吗?你能尝试在lstm单元周围设置变量范围吗?
with tf.variable_scope('lstm'):
outputs, states = rnn.static_rnn(lstm_cell, X, dtype=tf.float32)
关于变量范围的更多here。