https://github.com/cmgreen210/TensorFlowDeepAutoencoder 我试图在微调步骤之后保存和恢复模型我试图恢复模型然后从模型中获取变量并且它给了我这个错误,ValueError:变量autoencoder_variables / weights1不存在,或者不是用tf创建的.get_variable()。你的意思是在VarScope中设置reuse = None吗?如果我将重用变为false,则为权重和偏差创建一个新变量
这是我恢复模型的代码。
def do_eval(sess, eval_correct,images_placeholder,labels_placeholder,
data_set):
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in range(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = true_count / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
def evaluation(logits, labels):
return tf.reduce_sum(tf.cast(correct, tf.int32))
def test_nets(self):
data = read_data_sets(FLAGS.data_dir)
ckpt = tf.train.get_checkpoint_state("model_sps_2017-08-
29_11:45:25")
sess = tf.InteractiveSession()
saver = tf.train.import_meta_graph('model_sps_2017-08-29_11:45:25/model.meta')
saver.restore(sess, ckpt.model_checkpoint_path)
with sess.as_default():
ae_shape = [784, 2000, 2000, 2000, 10]
ae = AutoEncoder(ae_shape, sess)
input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size,
FLAGS.image_pixels),name='input_pl')
sup_net = ae.supervised_net(input_pl)
data = read_data_sets(FLAGS.data_dir)
labels_placeholder = tf.placeholder(tf.int32,
shape=FLAGS.batch_size,
name='target_pl')
eval_correct = evaluation(sup_net, labels_placeholder)
do_eval(sess,
eval_correct,
input_pl,
labels_placeholder,
data.test)
这是我训练和保存监督模型的代码
def main_supervised(ae):
with ae.session.graph.as_default():
saver = tf.train.Saver()
sess = ae.session
input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size,
FLAGS.image_pixels),
name='input_pl')
logits = ae.supervised_net(input_pl)
data = read_data_sets(FLAGS.data_dir)
num_train = data.train.num_examples
labels_placeholder = tf.placeholder(tf.int32,
shape=FLAGS.batch_size,
name='target_pl')
loss = loss_supervised(logits, labels_placeholder)
train_op, global_step = training(loss, FLAGS.supervised_learning_rate)
eval_correct = evaluation(logits, labels_placeholder)
hist_summaries = [ae['biases{0}'.format(i + 1)]
for i in range(ae.num_hidden_layers + 1)]
hist_summaries.extend([ae['weights{0}'.format(i + 1)]
for i in range(ae.num_hidden_layers + 1)])
hist_summaries = [tf.summary.histogram(v.op.name + "_fine_tuning", v)
for v in hist_summaries]
summary_op = tf.summary.merge(hist_summaries)
summary_writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph_def)
# tf.train.SummaryWriter(pjoin(FLAGS.summary_dir,
# 'fine_tuning'),
# graph_def=sess.graph_def,
# flush_secs=FLAGS.flush_secs)
vars_to_init = ae.get_variables_to_init(ae.num_hidden_layers + 1)
vars_to_init.append(global_step)
#sess.run(tf.initialize_variables(vars_to_init))
init = tf.initialize_all_variables()
sess.run(init)
steps = (num_train//FLAGS.batch_size)
for k in range(1):
for step in range(1):
start_time = time.time()
feed_dict = fill_feed_dict(data.train,
input_pl,
labels_placeholder)
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 1 == 0:
# Print status to stdout.
print('Step %d/%d: loss = %.2f (%.3f sec)' % (step, steps,loss_value, duration))
# Update the events file.
summary_str = sess.run(summary_op, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
# summary_img_str = sess.run(
# tf.summary.image("training_images",
# tf.reshape(input_pl,
# (FLAGS.batch_size,
# FLAGS.image_size,
# FLAGS.image_size, 1)),
# max_outputs=10),
# feed_dict=feed_dict
# )
# summary_writer.add_summary(summary_img_str)
if (step + 1) % 1000 == 0 or (step + 1) == steps:
train_sum = do_eval_summary("training_error",
sess,
eval_correct,
input_pl,
labels_placeholder,
data.train)
val_sum = do_eval_summary("validation_error",
sess,
eval_correct,
input_pl,
labels_placeholder,
data.validation)
test_sum = do_eval_summary("test_error",
sess,
eval_correct,
input_pl,
labels_placeholder,
data.test)
summary_writer.add_summary(train_sum, step)
summary_writer.add_summary(val_sum, step)
summary_writer.add_summary(test_sum, step)
folder = "model_sps_"+str(strftime("%Y-%m-%d_%H:%M:%S", gmtime()))
os.mkdir(folder)
folder += "/model"
saver.save(sess, folder)
do_eval(sess,
eval_correct,
input_pl,
labels_placeholder,
data.test)
这是我如何设置变量并在autoencoder_variables范围内逐个恢复
def _restore_variables(self):
#print(tf.get_collection(tf.GraphKeys.VARIABLES, scope='autoencoder_variables'))
# v=tf.get_variable("autoencoder_variables/weights1", shape=(784, 2000))
# print(v)
with tf.variable_scope("autoencoder_variables",reuse=True ) as scope1:
#print(tf.get_collection(tf.GraphKeys.VARIABLES, scope="autoencoder_variables"))
#print(scope)
#tf.Variable 'autoencoder_variables/weights1:0' shape=(784, 2000)
for i in range(self.__num_hidden_layers + 1):
# Train weights
name_w = self._weights_str.format(i + 1)
w_shape = (self.__shape[i], self.__shape[i + 1])
self[name_w] = tf.get_variable(name_w,w_shape,trainable=False)
# Train biases
name_b = self._biases_str.format(i + 1)
b_shape = (self.__shape[i + 1],)
self[name_b] = tf.get_variable(name_b,b_shape)
if i < self.__num_hidden_layers:
# Hidden layer fixed weights (after pretraining before fine tuning)
self[name_w + "_fixed"] = tf.get_variable(name_w+ "_fixed",w_shape)
# Hidden layer fixed biases
self[name_b + "_fixed"] = tf.get_variable(name_b+ "_fixed",b_shape)
# Pretraining output training biases
name_b_out = self._biases_str.format(i + 1) + "_out"
b_shape = (self.__shape[i],)
self[name_b_out] = tf.get_variable(name_b_out,b_shape)
追踪错误:
File "run.py", line 47, in <module>
main()
File "run.py", line 40, in main
test.test_nets_1()
File "C:\Users\simjs\Downloads\TensorFlowDeepAutoencoder-master\TensorFlowDeepAutoencoder-master\code\ae\autoencoder_test.py", line 55, in test_nets_1
ae = AutoEncoder(ae_shape, sess)
File "C:\Users\simjs\Downloads\TensorFlowDeepAutoencoder-master\TensorFlowDeepAutoencoder-master\code\ae\autoencoder.py", line 41, in __init__
self._restore_variables()
File "C:\Users\simjs\Downloads\TensorFlowDeepAutoencoder-master\TensorFlowDeepAutoencoder-master\code\ae\autoencoder.py", line 98, in _restore_variables
self[name_w] = tf.get_variable(name_w,w_shape)
File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 1065, in get_variable
use_resource=use_resource, custom_getter=custom_getter)
File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 962, in get_variable
use_resource=use_resource, custom_getter=custom_getter)
File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 367, in get_variable
validate_shape=validate_shape, use_resource=use_resource)
File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 352, in _true_getter
use_resource=use_resource)
File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 682, in _get_single_variable
"VarScope?" % name)
ValueError: Variable autoencoder_variables/weights1 does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?
答案 0 :(得分:0)
通过在恢复线后写print(sess.run(tf.report_uninitialized_variables()))
来检查哪些变量未初始化。
在恢复和分析结果之前,请尝试sess.run(tf.global_variables_initializer())
。