标签和预测数据之间的尺寸不兼容(KERAS-RALSGAN)

时间:2019-08-13 05:57:12

标签: tensorflow keras gan

我正在尝试在keras上训练RALSGAN,其损失函数可以从 https://www.kaggle.com/c/generative-dog-images/discussion/99485#latest-597003

# Take AVG over x_r and x_f in batch
disc_loss = (C(x_r) - AVG(C(x_f)) - 1)^2 + (C(x_f) - AVG(C(x_r)) + 1)^2
gen_loss = (C(x_r) - AVG(C(x_f)) + 1)^2 + (C(x_f) - AVG(C(x_r)) - 1)^2

为了获得C(x_r)和C(x_f),我将两个数据都发送到网络中并在输出之前将它们连接起来,但是,我遇到了尺寸不兼容的错误:

# =========loss function =========
import tensorflow as tf

batch =75

def TF_errD(label_one, predict):  
    fake, real, = tf.split(predict, [batch,batch], 0)
    label_one, label_one1 = tf.split(label_one, [batch,batch], 0)
    return (tf.reduce_mean( (real - tf.reduce_mean(fake,0) - label_one)**2,0 )
    + tf.reduce_mean( (fake - tf.reduce_mean(real,0) + label_one)**2,0 ) )/2.

# label_one here is a dummy, I use ones_like in return, however, get same error
def TF_errG(label_one, predict):
    fake, real, = tf.split(predict, [batch,batch], 0)
    return (tf.reduce_mean( (real - tf.reduce_mean(fake,0) +  tf.ones_like(real))**2,0 )
    + tf.reduce_mean( (fake - tf.reduce_mean(real,0) -  tf.ones_like(real))**2,0 ) )/2.

# =============end of loss function=======

#======== here is generator phase==============

from keras.layers import Input, Dense, Reshape, BatchNormalization, 
Flatten, Conv2D, UpSampling2D, Conv2DTranspose
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Sequential, Model
from keras.optimizers import Adam

input_dims = (1000,)
basic_units= 10

def conv_l(x, channel = 10,kernel = 4, stride= (2,2), padding='same', batch_n_momentum = 0.5,relu_arpha = 0.2 ):
    x = Conv2DTranspose(filters=channel,kernel_size=kernel,strides=stride,padding=padding)(x)
    x = BatchNormalization(momentum=batch_n_momentum)(x)
    x = LeakyReLU(alpha=relu_arpha)(x)
    return x

Input_l = Input(shape=input_dims)

x = Dense(512*8*8,input_dim=input_dims)(Input_l)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(momentum=0.5)(x)
x = Reshape((8,8,512))(x) 
x = conv_l(x, 512)
x = conv_l(x, 256)
x = conv_l(x, 128)
Output_l =  Conv2D(3,3,padding='same',activation='tanh')(x)

Generator = Model(Input_l,Output_l)

Generator.summary()
#======== end of generator ==============

#======== here is discriminator phase==============
from keras.layers import Dropout

Discriminator = Sequential()

Discriminator.add(Conv2D(32,4,padding='same',strides= 2, input_shape=    (64,64,3)))
Discriminator.add(LeakyReLU(alpha=0.2))
Discriminator.add(Conv2D(64,4,strides= 2, padding='same'))
Discriminator.add(LeakyReLU(alpha=0.2))
Discriminator.add(Conv2D(128,4,strides= 2,padding='same'))
Discriminator.add(LeakyReLU(alpha=0.2))
Discriminator.add(Conv2D(256,4,strides= 2,padding='same'))
Discriminator.add(LeakyReLU(alpha=0.2))
Discriminator.add(Dropout(0.3))
Discriminator.add(Conv2D(1,4,strides= 1,padding='valid'))


Discriminator.compile(loss=TF_errD, optimizer=optimizer_dis, metrics=['accuracy'])
Discriminator.summary()
#=========== end of discriminator ==============

#=========GAN network=========
from keras.layers.merge import concatenate

Discriminator.trainable = False

image_shape = imagesIn[0].shape

input_noise = Input(input_dims)
input_real = Input(image_shape)

output_g = Generator(input_noise)
dis_inputimage = concatenate([output_g,input_real],axis=0)
output = Discriminator(dis_inputimage)
GAN = Model([input_noise,input_real],output)

GAN.compile(loss=TF_errG, optimizer=optimizer_gen, metrics=['accuracy'])
GAN.summary()

#===============end of GAN ============


#========training phase============
 epochs = 600

 input_dim = 1000

 label_one_d=tf.ones([150,1,1,1], tf.int32)
 label_one_g=tf.ones([75,1,1,1], tf.int32)

 for times in range(epochs):

    for batch_times in range(int(idxIn/batch)):
        # =======================
        # ==train discriminater==
        # =======================
        sub_images = imagesIn2[batch_times*batch:(batch_times+1)*batch]

        noise = np.random.normal(0,1,(batch,input_dim))
        synthesis_img = Generator.predict(noise)
        data = np.concatenate((synthesis_img,sub_images))
        d_loss = Discriminator.train_on_batch(data,label_one_d)
        # =======================
        # ====train generator====
        # =======================
        noise = np.random.normal(0,1,(batch,input_dim))       

        g_loss = GAN.train_on_batch(([noise,sub_images]),label_one_g)

# ======================end of training===============

如果我设置了label_one_g = tf.ones([75,1,1,1],tf.int32) 那么我会得到类似的错误

InvalidArgumentError: 2 root error(s) found.
  (0) Invalid argument: Incompatible shapes: [150,1,1,1] vs. [75,1,1,1]
     [[{{node metrics_11/acc/Equal}}]]
     [[loss_11/mul/_1877]]
  (1) Invalid argument: Incompatible shapes: [150,1,1,1] vs. [75,1,1,1]
     [[{{node metrics_11/acc/Equal}}]]
0 successful operations.
0 derived errors ignored.  

如果我设置了label_one_g = tf.ones([150,1,1,1],tf.int32),则

ValueError: Input arrays should have the same number of samples as target     arrays. Found 75 input samples and 150 target samples.

需要帮助来解决此问题。还是我可以使用的其他任何火车方法?

0 个答案:

没有答案