以下代码用于深度卷积GAN:
class Dcgan:
def __init__(self,latent_space,gen_rate,disc_rate,images_source,resized_path,result_path,checkpoint_path,batch_size,optimizer,number_of_images_to_display,epoches):
self.latent_space = latent_space
self.gen_rate = gen_rate
self.disc_rate = disc_rate
self.images_source = images_source
self.training_path = os.listdir(self.images_source)
self.resized_path = resized_path
self.result_path = result_path
self.checkpoint_path = checkpoint_path
self.batch_size = batch_size
self.optimizer = optimizer
self.number_of_images_to_display = number_of_images_to_display
self.epoches = epoches
self.noise = tf.random.normal([self.batch_size,self.latent_space])
self.noise_gen_n_save = tf.random.normal([self.number_of_images_to_display,self.latent_space])
def resize_images(self):
if not os.path.exists(self.resized_path):
os.mkdir('images')
for i,image_path in enumerate(os.listdir(self.images_source)):
img = cv2.imread(os.path.join(self.images_source,image_path), cv2.IMREAD_UNCHANGED)
resized = cv2.resize(img, (28,28))
path_new = path_save+'/'+image_path
status = cv2.imwrite(path_new,resized)
images=[]
image_paths = os.listdir('images')
for image_path in image_paths:
images.append(imread(os.path.join(path_save,image_path)))
images = np.array(images).astype('float32')
self.training_images = (images-127.5)/127.5
def Generator(self):
generator = tf.keras.Sequential()
generator.add(Dense(units=8*8*256,input_shape=(self.latent_space,),use_bias=False))
generator.add(BatchNormalization())
generator.add(LeakyReLU())
generator.add(Reshape((8,8,256)))
assert generator.output_shape == (None,8,8,256)
generator.add(Conv2DTranspose(filters = 64 ,kernel_size = (5,5),strides = (2,2),padding="same",use_bias=False)) # 8 to 16
assert generator.output_shape == (None,16,16,64)
generator.add(BatchNormalization())
generator.add(LeakyReLU())
generator.add(Conv2DTranspose(filters = 16,kernel_size = (5,5),strides = (2,2),padding="same",use_bias=False)) # 16 to 32
assert generator.output_shape == (None,32,32,16)
generator.add(BatchNormalization())
generator.add(LeakyReLU())
generator.add(Conv2DTranspose(filters = 3,kernel_size = (5,5),strides = (2,2),padding="same",activation='tanh',use_bias=False)) # 32 to 64
assert generator.output_shape == (None,64,64,3)
# generator.add(BatchNormalization())
# generator.add(LeakyReLU())
# generator.add(Conv2DTranspose(filters = 9,kernel_size = (5,5),strides = (2,2),padding="same")) # 64 to 128
# assert generator.output_shape == (None,128,128,9)
# generator.add(BatchNormalization())
# generator.add(LeakyReLU())
# generator.add(Conv2DTranspose(filters = 3,kernel_size = (5,5),strides = (2,2),padding="same",activation='tanh')) # 128 to 256
# assert generator.output_shape == (None,256,256,3)
return generator
def Discriminator(self):
discriminator = tf.keras.Sequential()
discriminator.add(Conv2D(filters = 64,kernel_size = (5,5) ,strides = (2,2),padding = "same",activation='relu',input_shape=(64,64, 3)))
discriminator.add(Dropout(0.3))
discriminator.add(Conv2D(filters = 128,kernel_size = (5,5) ,strides = (2,2),padding = "same",activation='relu'))
discriminator.add(Dropout(0.3))
discriminator.add(Conv2D(filters = 256,kernel_size = (5,5) ,strides = (2,2),padding = "same",activation='relu'))
discriminator.add(Dropout(0.3))
discriminator.add(Conv2D(filters = 512,kernel_size = (5,5) ,strides = (2,2),padding = "same",activation='relu'))
discriminator.add(Dropout(0.3))
discriminator.add(Flatten())
discriminator.add(Dense(1))
return discriminator
def generator_loss(self):
self.gen_loss = BinaryCrossentropy(tf.ones_like(self.fake_output),self.fake_output)
return self.gen_loss
def discriminator_loss(self):
self.disc_loss_real = BinaryCrossentropy(tf.ones_like(self.real_output),self.real_output)
self.disc_loss_fake = BinaryCrossentropy(tf.zeros_like(self.fake_output),self.fake_output)
self.total_disc_loss = self.disc_loss_real+self.disc_loss_fake
return self.total_disc_loss
@tf.function
def train_step(self):
generator,discriminator = self.Generator(),self.Discriminator()
with tf.GradientTape() as grad_gen, tf.GradientTape() as grad_disc:
self.generated_images = generator(self.noise,training=True)
self.real_output = discriminator(self.training_path,training=True)
self.fake_output = discriminator(self.generated_images,training=True)
self.discriminator_loss()
self.generator_loss()
self.gen_gradients = grad_gen.gradient(self.gen_loss,generator.trainable_variables)
self.disc_gradients = grad_disc.gradient(self.total_disc_loss,discriminator.trainable_variables)
Adam(learning_rate=self.gen_rate).apply_gradients(zip(self.gen_gradients,generator.trainable_variables))
Adam(learning_rate=self.disc_rate).apply_gradients(zip(self.disc_gradients,discriminator.trainable_variables))
return self.gen_loss,self.total_disc_loss
def display_and_save(self,epoch):
output_images = self.generator(self.noise_gen_n_save,training = False)
plt.figure(figsize=(13,13))
for i in range(self.number_of_images_to_display):
plt.subplot(8,8,i+1)
plt.imshow(output_images[i,:,:,0])
plt.axis('off')
plt.savefig(fname='Traning images\Generated images after epoch : {}'.format(epoch),)
def training(self):
gen_losses,total_disc_losses = [],[]
for epoch in range(self.epoches):
start_time = time.time()
self.train_step()
gen_loss.append(self.gen_loss.numpy(),self.total_disc_loss.numpy())
display.clear_output(wait=True)
display_and_save(epoch)
return gen_losses,total_disc_losses
我跑步时:
dcgan = Dcgan(
latent_space = 100,
gen_rate = 0.0002,
disc_rate = 0.0003,
images_source = 'D:/PlantVillage-Dataset/raw/color/Orange___Haunglongbing_(Citrus_greening)',
resized_path = 'resized_images',
result_path = 'generated_images',
checkpoint_path = 'training_checkpoints',
batch_size = 256,
optimizer = Adam,
number_of_images_to_display = 16,
epoches = 100,
)
dcgan.training()
我收到以下警告,然后出现属性错误:
WARNING:tensorflow:Entity <bound method Dcgan.Generator of <__main__.Dcgan object at 0x0000024B49994E88>> could not be transformed and will be executed as-is. Please report this to the AutoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: Failed to parse source code of <bound method Dcgan.Generator of <__main__.Dcgan object at 0x0000024B49994E88>>, which Python reported as:
def Generator(self): ## followed by the entire code of Generator
......
AttributeError Traceback (most recent call last)
<ipython-input-122-9387d007efdb> in <module>
----> 1 dcgan.training()
<ipython-input-118-f9eb662d913f> in training(self)
134 for epoch in range(self.epoches):
135 start_time = time.time()
--> 136 self.train_step()
AttributeError: 'str' object has no attribute '_keras_mask'
我对python还是很陌生。我确信该错误是由于我所犯的某些错误而不是Python本身中的错误(如警告所示)而引起的。我只是不知道那是什么...