我正在尝试与Cycle_gan Tensorflow一起编码
我收到一条错误消息:
set(JNI_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../jni)
add_library(tflite-lib SHARED IMPORTED)
set_target_properties(tflite-lib
PROPERTIES IMPORTED_LOCATION
${JNI_DIR}/${ANDROID_ABI}/libtfl.so)
include_directories( ${JNI_DIR} )
target_link_libraries(
native-lib
tflite-lib
...)
我的完整错误:
OperatorNotAllowedInGraphError: using a `tf.Tensor` as a Python `bool` is not allowed: AutoGraph did convert this function. This might indicate you are trying to use an unsupported feature.
我对代码做了一些修改,
我的代码:
---------------------------------------------------------------------------
OperatorNotAllowedInGraphError Traceback (most recent call last)
<ipython-input-161-bde4fc92c3b9> in <module>
4 n = 0
5 for image_x, image_y in tf.data.Dataset.zip((train_horses, train_zebras)):
----> 6 train_step(image_x, image_y)
7 if n % 10 == 0:
8 print ('.', end='')
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\eager\function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\eager\def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\framework\func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
OperatorNotAllowedInGraphError: in user code:
<ipython-input-160-538af916a6fd>:28 train_step *
total_cycle_loss = calc_cycle_loss(real_x, cycled_x) + calc_cycle_losss(real_y, cycled_y)
<ipython-input-151-74a790ebcddf>:2 calc_cycle_loss *
loss1 = tf.reduce_mean(tf.abs(real_image, cycled_image))
C:\Users\astro\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\util\dispatch.py:201 wrapper **
return target(*args, **kwargs)
C:\Users\astro\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\ops\math_ops.py:388 abs
with ops.name_scope(name, "Abs", [x]) as name:
C:\Users\astro\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\framework\ops.py:6492 __enter__
return self._name_scope.__enter__()
c:\users\astro\appdata\local\programs\python\python38\lib\contextlib.py:113 __enter__
return next(self.gen)
C:\Users\astro\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\framework\ops.py:4176 name_scope
if name:
C:\Users\astro\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\framework\ops.py:877 __bool__
self._disallow_bool_casting()
C:\Users\astro\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\framework\ops.py:486 _disallow_bool_casting
self._disallow_when_autograph_enabled(
C:\Users\astro\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\framework\ops.py:472 _disallow_when_autograph_enabled
raise errors.OperatorNotAllowedInGraphError(
OperatorNotAllowedInGraphError: using a `tf.Tensor` as a Python `bool` is not allowed: AutoGraph did convert this function. This might indicate you are trying to use an unsupported feature.
错误试图将我指向import tensorflow as tf
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[ ]:
# In[118]:
print(tf.__version__)
# In[119]:
import time
from IPython.display import clear_output
AUTOTUNE = tf.data.experimental.AUTOTUNE
# In[120]:
train_horses = []
train_horses_path = 'C:/Users/astro/pythonprojects/cycleGANs/horse2zebra/trainA/'
for image_name in os.listdir(train_horses_path):
img_path = os.path.join(train_horses_path, image_name)
img_arr = plt.imread(img_path)
train_horses.append(img_arr)
train_horses = np.array(train_horses)
# In[121]:
print(train_horses.shape)
# (1067, 256, 256, 3)
# In[122]:
train_zebras_path = 'C:/Users/astro/pythonprojects/cycleGANs/horse2zebra/trainB/'
train_zebras = []
for image_name in os.listdir(train_zebras_path):
img_path = os.path.join(train_zebras_path, image_name)
img_arr = plt.imread(img_path)
train_zebras.append(img_arr)
train_zebras = np.array(train_zebras)
# In[123]:
test_horses_path = 'C:/Users/astro/pythonprojects/cycleGANs/horse2zebra/testA/'
test_horses = []
for image_name in os.listdir(test_horses_path):
img_path = os.path.join(test_horses_path, image_name)
img_arr = plt.imread(img_path)
test_horses.append(img_arr)
test_horses = np.array(test_horses)
# In[124]:
test_zebras_path = 'C:/Users/astro/pythonprojects/cycleGANs/horse2zebra/testB/'
test_zebras = []
for image_name in os.listdir(test_zebras_path):
img_path = os.path.join(test_zebras_path, image_name)
img_arr = plt.imread(img_path)
test_zebras.append(img_arr)
test_zebras = np.array(test_zebras)
# In[125]:
# In[126]:
# Setting image constants
BUFFER_SIZE = 1000
BATCH_SIZE = 1
IMG_WIDTH = 256
IMG_HEIGHT = 256
# In[127]:
# Inputing pipeline:
train_horses = tf.data.Dataset.from_tensor_slices(train_horses)
train_zebras = tf.data.Dataset.from_tensor_slices(train_zebras)
test_horses = tf.data.Dataset.from_tensor_slices(test_horses)
test_zebras = tf.data.Dataset.from_tensor_slices(test_zebras)
# In[128]:
# Function: random crops
def random_crop(image):
cropped_image = tf.image.random_crop(image, size=[IMG_HEIGHT, IMG_WIDTH, 3])
return cropped_image
# In[129]:
# Function: normalize (- for normalizing images to [-1, 1])
def normalize(image):
image = tf.cast(image, tf.float32)
# Note: the image pixel values lie from 0 to 255
image = (image / 127.5) - 1
return image
# In[130]:
# Function: random jitter
def random_jitter(image):
# Resizing the image to 286x286x3
image = tf.image.resize(image, size=[286, 286], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Randomly cropping to 256x256x3
image = random_crop(image)
# Random mirroring
image = tf.image.random_flip_left_right(image)
return image
# In[131]:
def preprocess_image_train(image):
image = random_jitter(image)
image = normalize(image)
return image
# In[132]:
def preprocess_image_test(image):
image = normalize(image)
return image
# In[133]:
train_horses = train_horses.map(
preprocess_image_train, num_parallel_calls=AUTOTUNE).cache().shuffle(buffer_size=BUFFER_SIZE).batch(1)
train_zebras = train_zebras.map(
preprocess_image_train, num_parallel_calls=AUTOTUNE).cache().shuffle(buffer_size=BUFFER_SIZE).batch(1)
test_horses = test_horses.map(
preprocess_image_test, num_parallel_calls=AUTOTUNE).cache().shuffle(buffer_size=BUFFER_SIZE).batch(1)
test_zebras = test_zebras.map(
preprocess_image_test, num_parallel_calls=AUTOTUNE).cache().shuffle(buffer_size=BUFFER_SIZE).batch(1)
# In[134]:
sample_horse = next(iter(train_horses))
sample_zebra = next(iter(train_zebras))
# In[135]:
plt.subplot(121)
plt.title('Horse')
plt.imshow(sample_horse[0] * 0.5 + 0.5)
plt.subplot(122)
plt.title('Horse with random jitter')
plt.imshow(random_jitter(sample_horse[0]) * 0.5 + 0.5)
# In[136]:
pix2pix_model_dir = 'C:/Users/astro/pythonprojects/cycleGANs/venv/Lib/site-packages/tensorflow_examples/models/pix2pix'
# In[137]:
# Generator G to transform image from X(horse) to Y(zebra). (G: Horse -> Zebra)
# Generator F to transform image from Y(zebra) to X(horse). (F: Zebra -> Horse)
# In[138]:
# Discriminator (Dx) Learns to differentiate between X and F(Y)(generated horse img)
# Discriminator (Dy) Learns to differentiate between Y and G(X)(generated zebra img)
# In[ ]:
# In[139]:
# Building Generator G
from tensorflow.keras import layers
def Generator():
# Creating input layer
input_layer = layers.Input(shape=[256, 256, 3]) # (bs, 256, 256, 3)
# Downsampling
conv1 = layers.Conv2D(filters=32, kernel_size=4, strides=2, padding='same', activation='relu')(
input_layer) # (bs, 128, 128, 32)
bat_norm = layers.BatchNormalization()(conv1)
conv2 = layers.Conv2D(filters=64, kernel_size=4, strides=2, activation='relu', padding='same')(
bat_norm) # (bs, 64, 64, 64)
bat_norm = layers.BatchNormalization()(conv2)
conv3 = layers.Conv2D(filters=128, kernel_size=4, strides=2, activation='relu', padding='same')(
bat_norm) # (bs, 32, 32, 128)
bat_norm = layers.BatchNormalization()(conv3)
conv4 = layers.Conv2D(filters=256, kernel_size=4, strides=2, activation='relu', padding='same')(
bat_norm) # (bs, 16, 16, 256)
bat_norm = layers.BatchNormalization()(conv4)
conv5 = layers.Conv2D(filters=256, kernel_size=4, strides=2, activation='relu', padding='same')(
bat_norm) # (bs, 4, 4, 512)
bat_norm = layers.BatchNormalization()(conv5)
conv6 = layers.Conv2D(filters=256, kernel_size=4, strides=2, padding='same', activation='relu')(
bat_norm) # (bs, 2, 2, 512)
bat_norm = layers.BatchNormalization()(conv6)
conv7 = layers.Conv2D(filters=256, kernel_size=4, strides=2, padding='same', activation='relu')(
bat_norm) # (bs, 1, 1, 512)
bat_norm = layers.BatchNormalization()(conv7)
# Upsampling
convt1 = layers.Conv2DTranspose(filters=256, kernel_size=4, strides=2, activation='relu', padding='same')(
bat_norm) # (bs, 2, 2, 1024)
convt1 = layers.concatenate([convt1, conv6])
drop = layers.Dropout(0.1)(convt1)
bat_norm = layers.BatchNormalization()(drop)
convt2 = layers.Conv2DTranspose(filters=256, kernel_size=4, strides=2, activation='relu', padding='same')(
bat_norm) # (bs, 4, 4, 1024)
convt2 = layers.concatenate([convt2, conv5])
drop = layers.Dropout(0.1)(convt2)
bat_norm = layers.BatchNormalization()(drop)
convt3 = layers.Conv2DTranspose(filters=256, kernel_size=4, strides=2, activation='relu', padding='same')(
bat_norm) # (bs, 8, 8, 1024)
convt3 = layers.concatenate([convt3, conv4])
drop = layers.Dropout(0.1)(convt3)
bat_norm = layers.BatchNormalization()(drop)
convt4 = layers.Conv2DTranspose(filters=128, kernel_size=4, strides=2, activation='relu', padding='same')(
bat_norm) # (bs, 16, 16, 512)
convt4 = layers.concatenate([convt4, conv3])
bat_norm = layers.BatchNormalization()(convt4)
convt5 = layers.Conv2DTranspose(filters=64, kernel_size=4, strides=2, activation='relu', padding='same')(
bat_norm) # (bs, 64, 64, 256)
convt5 = layers.concatenate([convt5, conv2])
bat_norm = layers.BatchNormalization()(convt5)
convt6 = layers.Conv2DTranspose(filters=32, kernel_size=4, strides=2, activation='relu', padding='same')(
bat_norm) # (bs, 64, 64, 128)
convt6 = layers.concatenate([convt6, conv1])
bat_norm = layers.BatchNormalization()(convt6)
# convt7 = layers.Conv2DTranspose(filters=64, kernel_size=4, strides=2, activation='relu',padding='same')(bat_norm) #(bs, 128, 128, 128)
# bat_norm = layers.BatchNormalization()(convt7)
convt_op = layers.Conv2DTranspose(filters=3, kernel_size=4, strides=2, activation='tanh', padding='same')(bat_norm)
return tf.keras.models.Model(inputs=input_layer, outputs=convt_op)
# In[140]:
generator_g = Generator()
# Displaying Generator
tf.keras.utils.plot_model(generator_g, show_shapes=True, dpi=64)
# In[141]:
# Creating generator_f
generator_f = Generator()
# In[142]:
# Defining discriminator
def Discriminator(target=False):
inp = layers.Input(shape=[256, 256, 3], name='input_image')
tar = layers.Input(shape=[256, 256, 3], name='target_image')
x = inp
if target:
x = layers.concatenate([inp, tar]) # (bs, 256, 256, channels*2)
# downsampling
conv1 = layers.Conv2D(filters=64, kernel_size=4, strides=2, padding='same', activation='relu')(
x) # (bs, 128, 128, 64)
leaky1 = layers.LeakyReLU()(conv1)
conv2 = layers.Conv2D(filters=128, kernel_size=4, strides=2, activation='relu', padding='same')(
leaky1) # (bs, 64, 64, 128)
bat_norm = layers.BatchNormalization()(conv2)
leaky2 = layers.LeakyReLU()(bat_norm)
conv3 = layers.Conv2D(filters=256, kernel_size=4, strides=2, activation='relu', padding='same')(
leaky2) # (bs, 32, 32, 256)
bat_norm = layers.BatchNormalization()(conv3)
leaky3 = layers.LeakyReLU()(bat_norm)
zero_pad1 = layers.ZeroPadding2D()(leaky3) # (bs, 34, 34, 256)
conv = tf.keras.layers.Conv2D(filters=512, kernel_size=4, strides=1, use_bias=False)(zero_pad1) # (bs, 31, 31, 512)
batch_norm = layers.BatchNormalization()(conv)
leaky_relu = layers.LeakyReLU()(batch_norm)
zero_pad2 = layers.ZeroPadding2D()(leaky_relu) # (bs, 33, 33, 512)
last = tf.keras.layers.Conv2D(1, 4, strides=1)(zero_pad2) # (bs, 30, 30, 1)
if target:
return tf.keras.Model(inputs=[inp, tar], outputs=last)
else:
return tf.keras.Model(inputs=inp, outputs=last)
# In[143]:
# Setting up discriminator x
discriminator_x = Discriminator()
tf.keras.utils.plot_model(discriminator_x, show_shapes=True, dpi=64)
# In[144]:
# Setting up discriminator_y
discriminator_y = Discriminator()
# In[145]:
with tf.device('/cpu:0'):
to_zebra = generator_g(sample_horse)
to_horse = generator_f(sample_zebra)
plt.figure(figsize=(8, 8))
contrast = 8
imgs = [sample_horse, to_zebra, sample_zebra, to_horse]
title = ['Horse', 'To Zebra', 'Zebra', 'To Horse']
for i in range(len(imgs)):
plt.subplot(2, 2, i + 1)
plt.title(title[i])
if i % 2 == 0:
plt.imshow(imgs[i][0] * 0.5 + 0.5)
else:
plt.imshow(imgs[i][0] * 0.5 * contrast + 0.5)
plt.show()
# In[146]:
plt.figure(figsize=(8, 8))
plt.subplot(121)
plt.title('Is a real zebra?')
with tf.device('/cpu:0'):
plt.imshow(discriminator_y(sample_zebra)[0, ..., -1], cmap='RdBu_r')
plt.subplot(122)
plt.title('Is a real horse?')
with tf.device('/cpu:0'):
plt.imshow(discriminator_x(sample_horse)[0, ..., -1], cmap='RdBu_r')
plt.show()
# In[147]:
LAMBDA = 10
# In[148]:
loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True,
label_smoothing=True)
# In[149]:
def discriminator_loss(real, generated):
real_loss = loss_obj(tf.ones_like(real), real)
generated_loss = loss_obj(tf.zeros_like(generated), generated).numpy()
total_disc_loss = real_loss + generated_loss
return total_disc_loss * 0.5
# In[150]:
def generator_loss(generated):
return loss_obj(tf.ones_like(generated), generated)
# In[ ]:
# In[151]:
def calc_cycle_loss(real_image, cycled_image):
loss1 = tf.reduce_mean(tf.abs(real_image, cycled_image))
return LAMBDA * loss1
# In[152]:
# In[154]:
''' generator_g is responsible for translating image X to image Y. Identity
loss says that, if you fed image Y to generator G, it should yield the real
image Y or something close to image Y.
identity_loss = |G(Y)-Y|+|F(X)-X|'''
def identity_loss(real_image, same_image):
loss = tf.reduce_mean(tf.abs(real_image - same_image))
print(loss)
return LAMBDA * 0.5 * loss
# In[155]:
# Optimizers
generator_g_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
generator_f_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
discriminator_x_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
discriminator_y_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
# In[156]:
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(generator_g=generator_g,
generator_f=generator_f,
discriminator_x=discriminator_x,
discriminator_y=discriminator_y,
generator_g_optimizer=generator_g_optimizer,
generator_f_optimizer=generator_f_optimizer,
discriminator_x_optimizer=discriminator_x_optimizer,
discriminator_y_optimizer=discriminator_y_optimizer)
# In[157]:
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=15)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
# In[158]:
def generate_images(model, test_input):
prediction = model(test_input)
plt.figure(figsize=(12, 12))
display_list = [test_input[0], prediction[0]]
title = ['Input Image', 'Predicted Image']
for i in range(2):
plt.subplot(1, 2, i + 1)
plt.title(title[i])
# getting the pixel values between [0, 1] to plot it.
plt.imshow(display_list[i] * 0.5 + 0.5)
plt.axis('off')
plt.show()
# Even though the training loop looks complicated, it consists of four basic steps:
#
# Get the predictions.
# Calculate the loss.
# Calculate the gradients using backpropagation.
# Apply the gradients to the optimizer.
# In[159]:
EPOCHS = 40
# In[160]:
@tf.function
def train_step(real_x, real_y):
# persistent is set to True because the tape is used more than once to calculate the gradients.
with tf.GradientTape(persistent=True) as tape:
# Generator G translates X -> Y
# Generator F translates Y -> X.
fake_y = generator_g(real_x, training=True)
cycled_x = generator_f(fake_y, training=True)
fake_x = generator_f(real_y, training=True)
cycled_y = generator_g(fake_x, training=True)
# same_x and same_y are used for identity loss.
same_x = generator_f(real_x, training=True)
same_y = generator_g(real_y, training=True)
disc_real_x = discriminator_x(real_x, training=True)
disc_real_y = discriminator_y(real_y, training=True)
disc_fake_x = discriminator_x(fake_x, training=True)
disc_fake_y = discriminator_y(fake_y, training=True)
# calculate the loss
gen_g_loss = generator_loss(disc_fake_y)
gen_f_loss = generator_loss(disc_fake_x)
total_cycle_loss = calc_cycle_loss(real_x, cycled_x) + calc_cycle_loss(real_y, cycled_y)
# Total generator loss = adversarial loss + cycle loss
total_gen_g_loss = gen_g_loss + total_cycle_loss + identity_loss(real_y, same_y)
total_gen_f_loss = gen_f_loss + total_cycle_loss + identity_loss(real_x, same_x)
disc_x_loss = discriminator_loss(disc_real_x, disc_fake_x)
disc_y_loss = discriminator_loss(disc_real_y, disc_fake_y)
# Calculate the gradients for generator and discriminator
generator_g_gradients = tape.gradient(total_gen_g_loss, generator_g.trainable_variables)
generator_f_gradients = tape.gradient(total_gen_f_loss, generator_f.trainable_variables)
discriminator_x_gradients = tape.gradient(disc_x_loss, discriminator_x.trainable_variables)
discriminator_y_gradients = tape.gradient(disc_y_loss, discriminator_y.trainable_variables)
# Apply the gradients to the optimizer
generator_g_optimizer.apply_gradients(zip(generator_g_gradients, generator_g.trainable_variables))
generator_f_optimizer.apply_gradients(zip(generator_f_gradients, generator_f.trainable_variables))
discriminator_x_optimizer.apply_gradients(zip(discriminator_x_gradients, discriminator_x.trainable_variables))
discriminator_y_optimizer.apply_gradients(zip(discriminator_y_gradients, discriminator_y.trainable_variables))
# In[161]:
for epoch in range(EPOCHS):
start = time.time()
n = 0
for image_x, image_y in tf.data.Dataset.zip((train_horses, train_zebras)):
train_step(image_x, image_y)
if n % 10 == 0:
print('.', end='')
n += 1
clear_output(wait=True)
# Using a consistent image (sample_horse) so that the progress of the model
# is clearly visible.
generate_images(generator_g, sample_horse)
if (epoch + 1) % 5 == 0:
ckpt_save_path = ckpt_manager.save()
print(f'Saving checkpoint for epoch {epoch + 1} at {ckpt_save_path}')
print(f'Time taken for epoch {epoch + 1} is {time.time() - start}')
答案 0 :(得分:1)
您是正确的。看看。
Set-ContentFilterConfig -SCLQuarantineEnabled $false
操作loss1 = tf.reduce_mean(tf.abs(real_image, cycled_image))
仅应采用一个值(第二个值用于操作的名称)。我认为正在发生的事情是,由于将tf.abs
作为操作的名称进行传递,因此正在运行某些cycled_image
语句来检查if
是否有效。由于它是张量,因此会引发错误。您要做的就是将其更改为
name
或类似的东西。