我正在为我的GAN的生成器网络网络实现this文件中提到的pixel_norm。为此,我正在使用以下代码
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.utils import np_utils
from tensorflow.keras import regularizers, initializers
from skimage.io import imread_collection
from tensorflow.keras.preprocessing import image
import numpy as np,os,cv2
from keras import backend as K
eps=0.6
from tensorflow.keras.layers import Activation
import keras
from keras.layers import Lambda
#%%
def pixel_norm(x, epsilon=1e-8):
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon)
#%%
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from collections import defaultdict
from PIL import Image
from six.moves import range
from glob import glob
models = tf.contrib.keras.models
layers = tf.contrib.keras.layers
utils = tf.contrib.keras.utils
losses = tf.contrib.keras.losses
optimizers = tf.contrib.keras.optimizers
metrics = tf.contrib.keras.metrics
preprocessing_image = tf.contrib.keras.preprocessing.image
datasets = tf.contrib.keras.datasets
def generator(latent_size, classes=2):
def up_sampling_block(x, filter_size):
x = layers.UpSampling2D(size=(2, 2))(x)
x = layers.Conv2D(filter_size, (5,5), padding='same', activation='relu')(x)
x = Lambda(pixel_norm, arguments={'epsilon':1e-8})(x)
return x
# Input 1
# image class label
image_class = layers.Input(shape=(1,), dtype='int32', name='image_class')
# class embeddings
emb = layers.Embedding(classes, latent_size,
embeddings_initializer='glorot_normal')(image_class)
# 10 classes in MNIST
cls = layers.Flatten()(emb)
# Input 2
# latent noise vector
latent_input = layers.Input(shape=(latent_size,), name='latent_noise')
h = layers.multiply([latent_input, cls])
# Conv generator
x = layers.Dense(256,)(h)
x = Activation('relu')(x)
x = layers.Dense(128 * 13 * 13, activation='relu')(x)
x = layers.Reshape((13, 13, 128))(x)
# upsample to (14, 14, 128)
x = up_sampling_block(x, 256)
# upsample to (28, 28, 256)
x = up_sampling_block(x, 128)
# reduce channel into binary image (28, 28, 1)
generated_img = layers.Conv2D(1, (3,3), padding='valid', activation='tanh')(x)
return models.Model(inputs=[latent_input, image_class],
outputs=generated_img,
name='generator')
g=generator(latent_size=50,classes=2)
运行后,我得到以下错误:
Traceback (most recent call last):
File "<ipython-input-5-4cbc575f5dae>", line 66, in <module>
g=generator(latent_size=50,classes=2)
File "<ipython-input-5-4cbc575f5dae>", line 53, in generator
x = up_sampling_block(x, 256)
File "<ipython-input-5-4cbc575f5dae>", line 26, in up_sampling_block
x = Lambda(pixel_norm, arguments={'epsilon':1e-8})(x)
File "/home/nd/anaconda3/lib/python3.6/site-packages/keras/engine/base_layer.py", line 443, in __call__
previous_mask = _collect_previous_mask(inputs)
File "/home/nd/anaconda3/lib/python3.6/site-packages/keras/engine/base_layer.py", line 1311, in _collect_previous_mask
mask = node.output_masks[tensor_index]
AttributeError: 'Node' object has no attribute 'output_masks'
如何解决此问题。我在网上搜索后发现一件事,可能是同时导入tensorflow层和keras层的问题。但是我不知道如何解决这个问题。