我正在尝试使用tensorflow和python 3.7运行用于训练神经网络的代码,但是出现此错误。我是tensorflow的新手,有人可以提示我如何解决吗?
这是我的错误:UnboundLocalError:分配前引用了本地变量'train_diretorio'
这是代码的一部分,如果有人可以帮助,我是python和tensorflow的新手
..Imports
def train():
model_config = configuration.ModelConfig()
model_config.input_file_pattern = input_file_pattern
model_config.inception_checkpoint_file = inception_checkpoint_file
training_config = configuration.TrainingConfig()
# Create training directory.
train_diretorio = train_diretorio
if not tf.gfile.IsDirectory(train_diretorio):
tf.logging.info("Creating training directory: %s", train_diretorio)
tf.gfile.MakeDirs(train_diretorio)
# Build the TensorFlow graph.
g = tf.Graph()
with g.as_default():
# Build the model.
model = show_and_tell_model.ShowAndTellModel(
model_config, mode="train", train_inception=train_inception)
model.build()
# Set up the learning rate.
learning_rate_decay_fn = None
if train_inception:
learning_rate = tf.constant(training_config.train_inception_learning_rate)
else:
learning_rate = tf.constant(training_config.initial_learning_rate)
if training_config.learning_rate_decay_factor > 0:
num_batches_per_epoch = (training_config.num_examples_per_epoch /
model_config.batch_size)
decay_steps = int(num_batches_per_epoch *
training_config.num_epochs_per_decay)
def _learning_rate_decay_fn(learning_rate, global_step):
return tf.train.exponential_decay(
learning_rate,
global_step,
decay_steps=decay_steps,
decay_rate=training_config.learning_rate_decay_factor,
staircase=True)
learning_rate_decay_fn = _learning_rate_decay_fn
# Set up the training ops.
train_op = tf.contrib.layers.optimize_loss(
loss=model.total_loss,
global_step=model.global_step,
learning_rate=learning_rate,
optimizer=training_config.optimizer,
clip_gradients=training_config.clip_gradients,
learning_rate_decay_fn=learning_rate_decay_fn)
# Set up the Saver for saving and restoring model checkpoints.
saver = tf.train.Saver(max_to_keep=training_config.max_checkpoints_to_keep)
# Run training.
tf.contrib.slim.learning.train(
train_op,
train_diretorio,
log_every_n_steps=log_every_n_steps,
graph=g,
global_step=model.global_step,
number_of_steps=number_of_steps,
init_fn=model.init_fn,
saver=saver)
input_file_pattern = 'im2txt/data/mscoco/train-?????-of-00256'
inception_checkpoint_file = 'im2txt/data/inception_v3.ckpt'
train_diretorio = 'im2txt/model'
train_inception = False
number_of_steps = 1000000
log_every_n_steps = 1
train()
train_inception = False
number_of_steps = 1000000
log_every_n_steps = 1
train()