WGAN-GP较大的振荡损耗

时间:2019-12-30 05:53:38

标签: tensorflow machine-learning keras loss-function generative-adversarial-network

我正在尝试按照此处所述训练WaveGAN:https://github.com/chrisdonahue/wavegan

在本文中,WaveGAN是使用WGAN-GP训练的,因此我尝试通过改编来自https://github.com/LynnHo/DCGAN-LSGAN-WGAN-GP-DRAGAN-Tensorflow-2的代码来自己实现。但是,即使仅经过2000步(约1个历元),我为批评家和生成器获得的损失值都很大(<1000),并且在负向和正向之间振荡。我的音频与他们使用的钢琴录音相同,只是以16000Hz重新采样并从立体声转换为单声道。

我的损失图是:

enter image description here

我希望有人可以验证我的实现是否正确,如果可以,我可以进行哪些实验来诊断此问题?

注意:TIMESTEPS表示我希望为每次通过生成的样本数量。目前,将其设置为1以复制WaveGAN,我希望以后再进行实验。目前,我认为这与问题无关。

我的train.py脚本是:

import tensorflow as tf
from tensorflow.keras.optimizers import Adam
import numpy as np
import librosa
import random
import os
import sys
import time

import GANModels

gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)

MODEL_DIMS = 64
NUM_SAMPLES = 16384
TIMESTEPS = 1
D_UPDATES_PER_G_UPDATE = 5
GRADIENT_PENALTY_WEIGHT = 10.0
NOISE_LEN = 100
EPOCHS = 2000
EPOCHS_PER_SAMPLE = 2
BATCH_SIZE = 8
Fs = 16000

class GAN:
    def __init__(self, model_dims=MODEL_DIMS, num_samples=NUM_SAMPLES, timesteps=TIMESTEPS, gradient_penalty_weight=GRADIENT_PENALTY_WEIGHT,
                 noise_len=NOISE_LEN, batch_size=BATCH_SIZE, sr=Fs):
        self.model_dims = model_dims
        self.num_samples = num_samples
        self.timesteps = timesteps
        self.noise_dims = (timesteps, noise_len)
        self.batch_size = batch_size

        self.G = GANModels.Generator(self.model_dims, self.timesteps, num_samples)
        self.D = GANModels.Critic(self.model_dims, self.timesteps, num_samples)

        self.G_optimizer = Adam(learning_rate=1e-4, beta_1=0.5, beta_2=0.9)
        self.D_optimizer = Adam(learning_rate=1e-4, beta_1=0.5, beta_2=0.9)

        print(self.G.summary())
        print(self.D.summary())

        self.gradient_penalty_weight = gradient_penalty_weight

        self.sr = sr

    def _d_loss_fn(self, r_logit, f_logit):
        r_loss = - tf.reduce_mean(r_logit)
        f_loss = tf.reduce_mean(f_logit)
        return r_loss, f_loss

    def _g_loss_fn(self, f_logit):
        f_loss = - tf.reduce_mean(f_logit)
        return f_loss

    def _gradient_penalty(self, real, fake):
        def _interpolate(a, b):
            shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)
            alpha = tf.random.uniform(shape=shape, minval=0., maxval=1.)
            inter = a + alpha * (b - a)
            inter.set_shape(a.shape)
            return inter

        x = _interpolate(real, fake)
        with tf.GradientTape() as t:
            t.watch(x)
            pred = self.D(x, training=True)

        grad = t.gradient(pred, x)
        norm = tf.norm(tf.reshape(grad, [tf.shape(grad)[0], -1]), axis=1)
        gp = tf.reduce_mean((norm - 1.)**2)

        return gp

    @tf.function
    def train_G(self):
        with tf.GradientTape() as t:
            z = tf.random.normal(shape=(self.batch_size,) + self.noise_dims)
            x_fake = self.G(z, training=True)
            x_fake_d_logit = self.D(x_fake, training=True)
            G_loss = self._g_loss_fn(x_fake_d_logit)

        G_grad = t.gradient(G_loss, self.G.trainable_variables)
        self.G_optimizer.apply_gradients(zip(G_grad, self.G.trainable_variables))

        return {'g_loss': G_loss}

    @tf.function
    def train_D(self, x_real):
        with tf.GradientTape() as t:
            z = tf.random.normal(shape=(x_real.shape[0],) + self.noise_dims) #Half fake and half real
            x_fake = self.G(z, training=True)

            x_real_d_logit = self.D(x_real, training=True)
            x_fake_d_logit = self.D(x_fake, training=True)

            x_real_d_loss, x_fake_d_loss = self._d_loss_fn(x_real_d_logit, x_fake_d_logit)
            gp = self._gradient_penalty(x_real, x_fake)

            D_loss = (x_real_d_loss + x_fake_d_loss) + gp * self.gradient_penalty_weight

        D_grad = t.gradient(D_loss, self.D.trainable_variables)
        self.D_optimizer.apply_gradients(zip(D_grad, self.D.trainable_variables))

        return {'d_loss': x_real_d_loss + x_fake_d_loss, 'gp': gp}

    def sample(self, epoch, num_samples=10):
        z = tf.random.normal(shape=(num_samples,) + self.noise_dims)
        result = self.G(z, training=False)
        for i in range(num_samples):
            audio = np.array(result[i, :, :])
            audio.flatten()
            librosa.output.write_wav(f"output/piano/{epoch}-{i}.wav", audio, sr=self.sr)


#############################################################################

gan = GAN()

X_train = []
for file in os.listdir(r"D:\ML_Datasets\mancini_piano\piano\train"):
    with open(r"D:\ML_Datasets\mancini_piano\piano\train" + fr"\{file}", "rb") as f:
        samples, _ = librosa.load(f, Fs)
        if len(samples) < TIMESTEPS*NUM_SAMPLES:
            audio = np.array([np.array([sample]) for sample in samples])
            padding = np.zeros(shape=(TIMESTEPS*NUM_SAMPLES - len(samples), 1), dtype='float32')
            X_train.append(np.append(audio, padding, axis=0))
        else:
            for i in range(len(samples) // (TIMESTEPS*NUM_SAMPLES)):
                X_train.append(np.array([np.array([sample]) for sample in samples[:TIMESTEPS*NUM_SAMPLES]]))
                samples = np.delete(samples, np.s_[:TIMESTEPS*NUM_SAMPLES])

print(f"X_train shape = {(len(X_train),) + X_train[0].shape}")

librosa.output.write_wav("output/piano/test.wav", X_train[0], sr=Fs)

train_summary_writer = tf.summary.create_file_writer("logs/train")

with train_summary_writer.as_default():
    steps_per_epoch = len(X_train) // BATCH_SIZE

    for e in range(EPOCHS):
        for i in range(steps_per_epoch):
            D_loss_sum = 0

            for n in range(D_UPDATES_PER_G_UPDATE):
                D_loss_dict = gan.train_D(np.array(random.sample(X_train, BATCH_SIZE)))
                D_loss_sum += D_loss_dict['d_loss']

            D_loss = D_loss_sum / D_UPDATES_PER_G_UPDATE

            G_loss_dict = gan.train_G()
            G_loss = G_loss_dict['g_loss']

            tf.summary.scalar('d_loss', D_loss, step=(e*steps_per_epoch)+i)
            tf.summary.scalar('g_loss', G_loss, step=(e*steps_per_epoch)+i)

            print(f"step {(e*steps_per_epoch)+i}: d_loss = {D_loss} g_loss = {G_loss}")

        if e % EPOCHS_PER_SAMPLE == 0:
            gan.sample(e)

我的GANModels.py脚本是:

def Generator(d, a, num_samples, c=16):

    # Prelim layers
    input_layer = Input(shape=(100,))

    dense_layer0 = Dense(256*d, input_shape=(100,))(input_layer)#
    reshape_layer0 = Reshape((c, c*d))(dense_layer0)#
    relu_layer0 = Activation('relu')(reshape_layer0)#

    # WaveCNN layers
    c //= 2
    expanded_layer0 = Lambda(lambda x: K.expand_dims(x, axis=1))(relu_layer0)#relu_layer1
    conv1d_t_layer0 = Conv2DTranspose(c*d, (1, 25), strides=(1, 4), padding='same')(expanded_layer0)
    slice_layer0 = Lambda(lambda x: x[:, 0])(conv1d_t_layer0)
    relu_layer2 = Activation('relu')(slice_layer0)

    c //= 2
    expanded_layer1 = Lambda(lambda x: K.expand_dims(x, axis=1))(relu_layer2)
    conv1d_t_layer1 = Conv2DTranspose(c*d, (1, 25), strides=(1, 4), padding='same')(expanded_layer1)
    slice_layer1 = Lambda(lambda x: x[:, 0])(conv1d_t_layer1)
    relu_layer3 = Activation('relu')(slice_layer1)

    c //= 2
    expanded_layer2 = Lambda(lambda x: K.expand_dims(x, axis=1))(relu_layer3)
    conv1d_t_layer2 = Conv2DTranspose(c*d, (1, 25), strides=(1, 4), padding='same')(expanded_layer2)
    slice_layer2 = Lambda(lambda x: x[:, 0])(conv1d_t_layer2)
    relu_layer4 = Activation('relu')(slice_layer2)

    c //= 2
    expanded_layer3 = Lambda(lambda x: K.expand_dims(x, axis=1))(relu_layer4)
    conv1d_t_layer3 = Conv2DTranspose(c*d, (1, 25), strides=(1, 4), padding='same')(expanded_layer3)
    slice_layer3 = Lambda(lambda x: x[:, 0])(conv1d_t_layer3)
    relu_layer5 = Activation('relu')(slice_layer3)

    expanded_layer4 = Lambda(lambda x: K.expand_dims(x, axis=1))(relu_layer5)
    conv1d_t_layer4 = Conv2DTranspose(1, (1, 25), strides=(1, 4), padding='same')(expanded_layer4)#strides=(1,1)
    slice_layer4 = Lambda(lambda x: x[:, 0])(conv1d_t_layer4)
    tanh_layer0 = Activation('tanh')(slice_layer4)

    model = Model(inputs=input_layer, outputs=tanh_layer0)

    return model

def _apply_phaseshuffle(x, rad=2, pad_type='reflect'):
    b, x_len, nch = x.get_shape().as_list()

    phase = tf.random.uniform([], minval=-rad, maxval=rad + 1, dtype=tf.int32)
    pad_l = tf.maximum(phase, 0)
    pad_r = tf.maximum(-phase, 0)
    phase_start = pad_r
    x = tf.pad(x, [[0, 0], [pad_l, pad_r], [0, 0]], mode=pad_type)

    x = x[:, phase_start:phase_start+x_len]
    x.set_shape([b, x_len, nch])

    return x

def Critic(d, a, num_samples, c=1):

    input_layer = Input(shape=(a*num_samples, 1))#d*d

    conv1d_layer0 = Conv1D(c*d, 25, strides=4, padding='same')(input_layer)#//2
    LReLU_layer0 = LeakyReLU(alpha=0.2)(conv1d_layer0)
    phaseshuffle_layer0 = Lambda(lambda x: _apply_phaseshuffle(x))(LReLU_layer0)

    c *= 2
    conv1d_layer1 = Conv1D(c*d, 25, strides=4, padding='same')(phaseshuffle_layer0)#d
    LReLU_layer1 = LeakyReLU(alpha=0.2)(conv1d_layer1)
    phaseshuffle_layer1 = Lambda(lambda x: _apply_phaseshuffle(x))(LReLU_layer1)

    c *= 2
    conv1d_layer2 = Conv1D(c*d, 25, strides=4, padding='same')(phaseshuffle_layer1)#2*d
    LReLU_layer2 = LeakyReLU(alpha=0.2)(conv1d_layer2)
    phaseshuffle_layer2 = Lambda(lambda x: _apply_phaseshuffle(x))(LReLU_layer2)

    c *= 2
    conv1d_layer3 = Conv1D(c*d, 25, strides=4, padding='same')(phaseshuffle_layer2)#4*d
    LReLU_layer3 = LeakyReLU(alpha=0.2)(conv1d_layer3)
    phaseshuffle_layer3 = Lambda(lambda x: _apply_phaseshuffle(x))(LReLU_layer3)

    c *= 2
    conv1d_layer4 = Conv1D(c*d, 25, strides=4, padding='same')(phaseshuffle_layer3)#8*d,strides=4
    LReLU_layer4 = LeakyReLU(alpha=0.2)(conv1d_layer4)
    phaseshuffle_layer4 = Lambda(lambda x: _apply_phaseshuffle(x))(LReLU_layer4)

    slice_layer0 = Lambda(lambda x: x[:, 0])(phaseshuffle_layer4)

    dense_layer1 = Dense(1, input_shape=(256*d,))(slice_layer0)

    model = Model(inputs=input_layer, outputs=dense_layer1)

    return model

0 个答案:

没有答案