ValueError:没有为任何可变的Tensorflow 2.0提供渐变

时间:2019-04-24 12:19:57

标签: tensorflow

我正在使用tensorflow 2.0并试图使一个演员评论家算法玩卡特尔游戏。我已正确完成所有操作,但收到以下错误:ValueError: No gradients provided for any variable: ['dense/kernel:0', 'dense/bias:0', 'dense_1/kernel:0', 'dense_1/bias:0']. 请帮帮我

这是我的代码:

import gym
import tensorflow as tf 
import numpy as np 
import matplotlib.pyplot as plt 

MAX_EPISODES = 2000
GAMMA = 0.9
LR_A = 0.001
LR_C = 0.01
env = gym.make("CartPole-v0")
N_ACTIONS = env.action_space.n 
N_FEATURES = 4 

def make_actor(n_features, n_actions):
    inputs = tf.keras.Input(shape=[n_features])
    hidden = tf.keras.layers.Dense(20, activation=tf.nn.relu)(inputs)
    dist = tf.keras.layers.Dense(n_actions, activation=tf.nn.softmax)(hidden)
    model = tf.keras.Model(inputs=inputs, outputs=dist)
    return model

def make_critic(n_features):
    inputs = tf.keras.Input(shape=[n_features])
    hidden = tf.keras.layers.Dense(20, activation=tf.nn.relu)(inputs)
    value = tf.keras.layers.Dense(1)(hidden)
    model = tf.keras.Model(inputs=inputs, outputs=value)
    return model

actor = make_actor(N_FEATURES, N_ACTIONS)
critic = make_critic(N_FEATURES)
actor.summary()
critic.summary()
actor_optimizer = tf.keras.optimizers.Adam(LR_A)
critic_optimizer = tf.keras.optimizers.Adam(LR_C)

def loss_actor(s, a, td_error):
    dist = actor(s.reshape(1, 4)).numpy()
    log_prob = np.log(dist[0, a])
    exp_v = np.mean(log_prob * td_error)
    return tf.multiply(exp_v, -1)

def loss_critic(s, s_, r, gamma):
    s, s_ = s[np.newaxis, :], s_[np.newaxis, :]
    v = critic(s)
    v_ = critic(s_)
    td_error = r + gamma * v_ - v 
    return tf.multiply(td_error, 1)

def train(max_episodes):
    for episode in range(max_episodes):
        s = env.reset().astype(np.float32)
        t = 0
        track_r = []
        while True:
            dist = actor(s.reshape(1, 4)).numpy()
            a = np.random.choice(range(N_ACTIONS), p=dist.ravel())
            s_, r, done, info = env.step(a)
            s_ = s_.astype(np.float32)
            if done: r=-20
            track_r.append(r)

            with tf.GradientTape() as cri_tape, tf.GradientTape() as act_tape:
                td_error = loss_critic(s, s_, r, GAMMA)
            gradient = cri_tape.gradient(td_error, critic.trainable_variables)
            critic_optimizer.apply_gradients(zip(gradient,critic.trainable_variables))

            with tf.GradientTape() as act_tape:
                neg_exp_v = loss_actor(s, a, td_error.numpy())
            gradient = act_tape.gradient(neg_exp_v, critic.trainable_variables)
            actor_optimizer.apply_gradients(zip(gradient, actor.trainable_variables))

            s = s_
            t += 1
            if done:
                print("Episode:{}   Steps:{}".format(episode+1, t))

train(MAX_EPISODES)

错误出现在第69行:actor_optimizer.apply_gradients(zip(gradient, actor.trainable_variables)) 当我尝试打印演员的渐变时,结果为无。 我真的不明白问题出在哪里。

0 个答案:

没有答案