半监督 gan 检查梯度和步骤

时间:2021-07-12 13:54:09

标签: tensorflow keras generative-adversarial-network

我正在使用 tensorflow keras 开发半监督 gan 的代码,但我对我为它编写的代码有些不确定,我不确定它是否正确。

for epoch in range(n_epochs):

train_split= np.array_split(np.arange(train_array), split_size)

print(noise_coeff)
for step,section in enumerate(train_split):


    supervised_samples = select_supervised_audio_samples(supervised_data,n_samples=n_batch)
    np.random.shuffle(supervised_samples)
    x_sup,y_sup = feature_setting(np.arange(n_batch),supervised_samples)


    
    random_indexes = random.sample(range(0, len(unsupervised_data)), n_batch)
    x_unsup_real, _ = feature_setting(random_indexes,unsupervised_data,encode_labels=False)
    y_unsup_real = smooth_positive_labels(np.asarray(ones(n_batch)))

    
    z_input , _ = generate_latent_points(latent_dim,n_batch)
    with tf.GradientTape() as tape:
       c_model_pred = c_model(x_sup, True)[0]
       c_loss_value = tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(y_true=y_sup,y_pred=c_model_pred, from_logits=True))

       first_real_pred = c_model(x_unsup_real,True)
       synt_data = g_model(z_input,True)
       second_fake_pred = c_model(synt_data,True)
        
       d_real = 0.5 * tf.reduce_mean(tf.keras.losses.binary_crossentropy(y_true=y_unsup_real,y_pred=d_output(first_real_pred[0]), from_logits=False))
       d_fake = 0.5 * tf.reduce_mean(tf.keras.losses.binary_crossentropy(y_true=np.zeros((n_batch,)),y_pred=d_output(second_fake_pred[0]), from_logits=False))

       d_loss_value =  d_real + d_fake
    
    
        
    thing =  np.argmax(Activation("softmax")(c_model_pred) , axis=1).reshape(-1,1)
    acc_object.update_state(y_sup.reshape(-1,1),  thing )
    
    grads = tape.gradient([c_loss_value,d_loss_value],c_model.trainable_weights)
    
    
    
    
    


    train_history_loss_buffer.append(c_loss_value)
    unsup_real_loss_buffer.append(d_loss_value)
    total_loss_buffer.append(c_loss_value+d_loss_value)

    
    
    real_activations = c_model(x_unsup_real,training=False)[1] 
          




    with tf.GradientTape() as gen_tape:
        synt_data_class = g_model(z_input, True)
        g_activations = c_model(  tf.clip_by_value(synt_data_class+np.random.normal(0,noise_coeff,synt_data_class.shape),-1,1), True)
        real_activations_mean = tf.reduce_mean(real_activations,axis=0)
        g_activations_mean = tf.reduce_mean(g_activations[1],axis=0)

        
        g_loss_value=tf.reduce_mean(tf.keras.losses.binary_crossentropy(y_true=np.ones((n_batch,)),y_pred=d_output(g_activations[0]), from_logits=False))
        
    
        
    g_grads = gen_tape.gradient(g_loss_value, g_model.trainable_weights)
        
    gen_loss_buffer.append(g_loss_value)
    
    d_opt.apply_gradients(zip(grads, c_model.trainable_weights))
    g_opt.apply_gradients(zip(g_grads, g_model.trainable_weights))

作为分类器和鉴别器的 c_model 输出中间层特征(可能用于特征匹配损失)和最终 logits。 d_output 函数输出由相同 logsumexp+1 划分的 logits 的 logsumexp,用于提供的样本属于真实类别之一的概率。 谁能帮我解决一些潜在的调试问题?

0 个答案:

没有答案