我收到此错误:
ValueError:Tensor转换请求具有dtype bool的Tensor的dtype字符串:
<tf.Tensor 'lr/write_summary/Const:0' shape=() dtype=bool>
在处理上述异常期间,发生了另一个异常:
Traceback (most recent call last):
File "/user/iibi/skuanar/Downloads/VAE-GAN-Autoencoding-Beyond-Pixels-Using-a-Similarity-Metric-master/train_vae_gan.py", line 30, in <module>
training = train_stuff(gan)
File "/nfs/s-iibi54/users/skuanar/Downloads/VAE-GAN-Autoencoding-Beyond-Pixels-Using-a-Similarity-Metric-master/vaegan.py", line 245, in train_stuff
summ_op = tf.compat.v1.summary.merge(summaries)
File "/user/iibi/skuanar/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/summary/summary.py", line 371, in merge
val = _gen_logging_ops.merge_summary(inputs=inputs, name=name)
File "/user/iibi/skuanar/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/ops/gen_logging_ops.py", line 530, in merge_summary
"MergeSummary", inputs=inputs, name=name)
File "/user/iibi/skuanar/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/framework/op_def_library.py", line 493, in _apply_op_helper
(prefix, dtype.name))
TypeError: Tensors in list passed to 'inputs' of 'MergeSummary' Op have types [bool, bool, bool, bool, bool, bool, bool, bool, bool] that do not match expected type string.
我的代码:
def train_stuff(model):
#global_step = tf.train.create_global_step()
global_step = tf.compat.v1.train.create_global_step()
# learning_rate = tf.train.exponential_decay(config.learning_rate, global_step=global_step, decay_steps=10000, decay_rate=0.98)
learning_rate = tf.compat.v1.train.exponential_decay(config.learning_rate, global_step=global_step, decay_steps=10000, decay_rate=0.98)
train_vars = tf.compat.v1.trainable_variables()
dec_vars = [var for var in train_vars if "decoder" in var.name]
dis_vars = [var for var in train_vars if "discriminator" in var.name]
enc_vars = [var for var in train_vars if "encoder" in var.name]
dec_opt = tf.compat.v1.train.RMSPropOptimizer(learning_rate=learning_rate)
dis_opt = tf.compat.v1.train.RMSPropOptimizer(learning_rate=learning_rate)
enc_opt = tf.compat.v1.train.RMSPropOptimizer(learning_rate=learning_rate)
dec_grad = dec_opt.compute_gradients(model["dec_loss"], var_list=dec_vars)
dis_grad = dis_opt.compute_gradients(model["dis_loss"], var_list=dis_vars)
enc_grad = enc_opt.compute_gradients(model["enc_loss"], var_list=enc_vars)
dec_op = dec_opt.apply_gradients(dec_grad)
dis_op = dis_opt.apply_gradients(dis_grad)
enc_op = enc_opt.apply_gradients(enc_grad, global_step=global_step)
summaries = [tf.summary.scalar("lr", learning_rate)] + model["summaries"]
summ_op = tf.compat.v1.summary.merge(summaries)
init = tf.compat.v1.global_variables_initializer()
saver = tf.compat.v1.train.Saver()
stuff = {"global_step": global_step, "dec_op": dec_op, "dis_op": dis_op, "summ_op": summ_op, "init": init, "saver": saver, "enc_op": enc_op}
return stuff