我正在尝试构建此LSTM网络,并且我不断遇到此错误。我在谷歌上查了一下,我仍然不确定发生了什么。我试过添加这个:用tf.variable_scope(“cell1”)。还是行不通。非常感谢任何帮助。
这是主LSTM结构的代码
def lstm_structure(self):
train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = self.data_preprocessing(
'SSE_Composite_Index.csv')
graph=tf.Graph()
with graph.as_default():
'''Placeholders'''
X=tf.placeholder(tf.float32,shape=[None,self.num_steps,self.input_size])
y=tf.placeholder(tf.float32,shape=[None,self.num_classes])
valid=tf.constant(valid_dataset)
test=tf.constant(test_dataset)
'''Weights'''
weights={'in':tf.Variable(tf.random_normal([self.input_size,self.num_neurons])),'out':tf.Variable(tf.random_normal([self.num_neurons,self.num_classes]))}
'''Biases'''
biases={'in':tf.Variable(tf.zeros(shape=[self.num_neurons,])),'out':tf.Variable(tf.zeros(shape=[self.num_classes,]))}
def lstm(X, weights, biases, reuse=True
):
'''from input to cell'''
with tf.variable_scope("foo") as f:
if reuse:
f.reuse_variables()
X = tf.reshape(X, shape=[-1, self.input_size])
X_in = tf.matmul(X, weights['in']) + biases['in']
X_in = tf.reshape(X_in, shape=[-1, self.num_steps, self.num_neurons])
'''cell'''
cell_ = tf.contrib.rnn.BasicLSTMCell(self.num_neurons, forget_bias=1, state_is_tuple=True)
_init_state = cell_.zero_state(self.batch_size, dtype=tf.float32)
outputs, states = tf.nn.dynamic_rnn(cell_, X_in, initial_state=_init_state, time_major=False, dtype=tf.float32)
'''from cell to output'''
results = tf.matmul(states[1], weights['out']) + biases['out']
return results
logits=lstm(X,weights,biases,False)
loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(loss)
train_prediction=tf.nn.softmax(logits=logits)
valid_prediction=tf.nn.softmax(lstm(valid,weights,biases,True))
test_prediction=tf.nn.softmax(lstm(test,weights,biases,True))
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialised')
loss_list = []
for step in range(self.num_epochs):
if step==0:
i = 0
while i < len(train_dataset):
start = i
end = i + self.batch_size
batch_data = train_dataset[start:end, :]
batch_label = train_labels[start:end, :]
i += self.batch_size
a, b, prediction = session.run([optimizer, loss, train_prediction],
feed_dict={X: batch_data, y: batch_label})
loss_list.append(b)
else:
i = 0
while i < len(train_dataset):
start = i
end = i + self.batch_size
batch_data = train_dataset[start:end, :]
batch_label = train_labels[start:end, :]
i += self.batch_size
a, b, prediction = session.run([optimizer, loss, train_prediction],
feed_dict={X: batch_data, y: batch_label})
loss_list.append(b)
if step % 10 == 0:
print('Step', step, 'Loss', b)
print('Training Accuracy', self.accuracy(prediction, batch_label), '%')
print('Validation Accuracy',
self.accuracy(valid_prediction.eval(), valid_labels), '%')
print('test Accuracy', self.accuracy(test_prediction.eval(), test_labels), '%')
print('Finished')
我收到的错误消息是:
Traceback (most recent call last):
File "C:/Users/LiXin/PycharmProjects/PythonProjects/LSTM_CLASSIFIER.py", line 114, in <module>
lstm.LSTM_structure()
File "C:/Users/LiXin/PycharmProjects/PythonProjects/LSTM_CLASSIFIER.py", line 87, in LSTM_structure
valid_prediction=tf.nn.softmax(LSTM(valid,weights,biases))
File "C:/Users/LiXin/PycharmProjects/PythonProjects/LSTM_CLASSIFIER.py", line 75, in LSTM
outputs,states=tf.nn.dynamic_rnn(cell_,X_in,initial_state=_init_state,time_major=False,dtype=tf.float32)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\rnn.py", line 614, in dynamic_rnn
dtype=dtype)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\rnn.py", line 777, in _dynamic_rnn_loop
swap_memory=swap_memory)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 2816, in while_loop
result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 2640, in BuildLoop
pred, body, original_loop_vars, loop_vars, shape_invariants)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 2590, in _BuildLoop
body_result = body(*packed_vars_for_body)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\rnn.py", line 762, in _time_step
(output, new_state) = call_cell()
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\rnn.py", line 748, in <lambda>
call_cell = lambda: cell(input_t, state)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 183, in __call__
return super(RNNCell, self).__call__(inputs, state)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\layers\base.py", line 575, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 438, in call
self._linear = _Linear([inputs, h], 4 * self._num_units, True)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 1171, in __init__
initializer=kernel_initializer)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 1203, in get_variable
constraint=constraint)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 1092, in get_variable
constraint=constraint)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 417, in get_variable
return custom_getter(**custom_getter_kwargs)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 186, in _rnn_get_variable
variable = getter(*args, **kwargs)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 394, in _true_getter
use_resource=use_resource, constraint=constraint)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 742, in _get_single_variable
name, "".join(traceback.format_list(tb))))
ValueError: Variable rnn/basic_lstm_cell/kernel already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope? Originally defined at:
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\framework\ops.py", line 1470, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\framework\ops.py", line 2956, in create_op
op_def=op_def)
File "C:\Users\LiXin\PycharmProjects\PythonProjects\venv\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
但是,以下代码可以使用。
def RNN_neural_network_model(x):
layer={'weights':tf.Variable(tf.random_normal([rnn_size,n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
x=tf.transpose(x,[1,0,2])
x=tf.reshape(x,[-1,chunk_size])
x=tf.split(x,n_chunks,0)
lstm_cell=tf.contrib.rnn.BasicLSTMCell(rnn_size,state_is_tuple=True)
# stacked_lstm = tf.contrib.rnn.MultiRNNCell(
# [lstmcell(rnn_size) for _ in range(num_layers)])
outputs, states=rnn.static_rnn(lstm_cell,x,dtype=tf.float32)
output = tf.matmul(outputs[-1], layer['weights'])+ layer['biases']
return output
答案 0 :(得分:0)
您应该将LSTM
定义包装在变量范围内,然后重复使用它进行验证和测试。请尝试以下
def LSTM(X,weights,biases, reuse=reuse):
'''from input to cell'''
with tf.variable_scope("foo") as f:
if reuse:
f.reuse_variables()
X=tf.reshape(X,shape=[-1,self.input_size])
X_in=tf.matmul(X,weights['in'])+biases['in']
X_in=tf.reshape(X_in,shape=[-1,self.num_steps,self.num_neurons])
'''cell'''
cell_=tf.contrib.rnn.BasicLSTMCell(self.num_neurons,forget_bias=1,state_is_tuple=True)
_init_state=cell_.zero_state(self.batch_size,dtype=tf.float32)
outputs,states=tf.nn.dynamic_rnn(cell_,X_in,initial_state=_init_state,time_major=False,dtype=tf.float32)
'''from cell to output'''
results=tf.matmul(states[1],weights['out'])+biases['out']
return results
更改培训,测试代码如下
logits=LSTM(X,weights,biases,False)
loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(loss)
train_prediction=tf.nn.softmax(logits=logits)
valid_prediction=tf.nn.softmax(LSTM(valid,weights,biases, True))
test_prediction=tf.nn.softmax(LSTM(test,weights,biases, True))