了解渐变策略派生

时间:2019-10-28 02:06:23

标签: python machine-learning math deep-learning reinforcement-learning

我正在尝试从原始资源Andrej Karpathy blog重新创建非常简单的Policy Gradient示例。在该文章中,您将找到带有CartPole和Policy Gradient以及重量和Softmax激活列表的示例。这是我重新创建的非常简单的CartPole政策梯度示例,效果完美

import gym
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
import copy

NUM_EPISODES = 4000
LEARNING_RATE = 0.000025
GAMMA = 0.99


# noinspection PyMethodMayBeStatic
class Agent:
    def __init__(self):
        self.poly = PolynomialFeatures(1)
        self.w = np.random.rand(5, 2)

    def policy(self, state):
        z = state.dot(self.w)
        exp = np.exp(z)
        return exp/np.sum(exp)

    def __softmax_grad(self, softmax):
        s = softmax.reshape(-1,1)
        return np.diagflat(s) - np.dot(s, s.T)

    def grad(self, probs, action, state):
        dsoftmax = self.__softmax_grad(probs)[action,:]
        dlog = dsoftmax / probs[0,action]
        grad = state.T.dot(dlog[None,:])
        return grad

    def update_with(self, grads, rewards):

        for i in range(len(grads)):
            # Loop through everything that happend in the episode
            # and update towards the log policy gradient times **FUTURE** reward

            total_grad_effect = 0
            for t, r in enumerate(rewards[i:]):
                total_grad_effect += r * (GAMMA ** r)
            self.w += LEARNING_RATE * grads[i] * total_grad_effect
            print("Grads update: " + str(np.sum(grads[i])))



def main(argv):
    env = gym.make('CartPole-v0')
    np.random.seed(1)

    agent = Agent()
    complete_scores = []

    for e in range(NUM_EPISODES):
        state = env.reset()[None, :]
        state = agent.poly.fit_transform(state)

        rewards = []
        grads = []
        score = 0

        while True:

            probs = agent.policy(state)
            action_space = env.action_space.n
            action = np.random.choice(action_space, p=probs[0])

            next_state, reward, done,_ = env.step(action)
            next_state = next_state[None,:]
            next_state = agent.poly.fit_transform(next_state.reshape(1, 4))
            grad = agent.grad(probs, action, state)

            grads.append(grad)
            rewards.append(reward)

            score += reward
            state = next_state

            if done:
                break

        agent.update_with(grads, rewards)
        complete_scores.append(score)

    env.close()
    plt.plot(np.arange(NUM_EPISODES),
             complete_scores)
    plt.savefig('image1.png')


if __name__ == '__main__':
    main(None)

enter image description here

问题

我正在尝试做几乎相同的示例,但使用了Sigmoid激活(仅出于简化目的)。 这就是我要做的。将模型中的激活从softmax切换到sigmoid应该可以正常工作(基于以下说明)。但是我的Policy Gradient模型什么也不学,并且保持随机性。有什么建议吗?

import gym
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures

NUM_EPISODES = 4000
LEARNING_RATE = 0.000025
GAMMA = 0.99


# noinspection PyMethodMayBeStatic
class Agent:
    def __init__(self):
        self.poly = PolynomialFeatures(1)
        self.w = np.random.rand(5, 1) - 0.5

    # Our policy that maps state to action parameterized by w
    # noinspection PyShadowingNames
    def policy(self, state):
        z = np.sum(state.dot(self.w))
        return self.sigmoid(z)

    def sigmoid(self, x):
        s = 1 / (1 + np.exp(-x))
        return s

    def sigmoid_grad(self, sig_x):
        return sig_x * (1 - sig_x)

    def grad(self, probs, action, state):
        dsoftmax = self.sigmoid_grad(probs)
        dlog = dsoftmax / probs
        grad = state.T.dot(dlog)
        grad = grad.reshape(5, 1)
        return grad

    def update_with(self, grads, rewards):
        if len(grads) < 50:
            return
        for i in range(len(grads)):
            # Loop through everything that happened in the episode
            # and update towards the log policy gradient times **FUTURE** reward

            total_grad_effect = 0
            for t, r in enumerate(rewards[i:]):
                total_grad_effect += r * (GAMMA ** r)
            self.w += LEARNING_RATE * grads[i] * total_grad_effect


def main(argv):
    env = gym.make('CartPole-v0')
    np.random.seed(1)

    agent = Agent()
    complete_scores = []

    for e in range(NUM_EPISODES):
        state = env.reset()[None, :]
        state = agent.poly.fit_transform(state)

        rewards = []
        grads = []
        score = 0

        while True:

            probs = agent.policy(state)
            action_space = env.action_space.n
            action = np.random.choice(action_space, p=[1 - probs, probs])

            next_state, reward, done, _ = env.step(action)
            next_state = next_state[None, :]
            next_state = agent.poly.fit_transform(next_state.reshape(1, 4))

            grad = agent.grad(probs, action, state)
            grads.append(grad)
            rewards.append(reward)

            score += reward
            state = next_state

            if done:
                break

        agent.update_with(grads, rewards)
        complete_scores.append(score)

    env.close()
    plt.plot(np.arange(NUM_EPISODES),
             complete_scores)
    plt.savefig('image1.png')


if __name__ == '__main__':
    main(None)

绘制所有学习内容将保持随机性。调整超级参数没有任何帮助。在示例图像下方。

enter image description here

  

参考

     

1)Deep Reinforcement Learning: Pong from Pixels

     

2)An introduction to Policy Gradients with Cartpole and Doom

     

3)Deriving Policy Gradients and Implementing REINFORCE

     

4)Machine Learning Trick of the Day (5): Log Derivative Trick 12


更新

似乎下面的答案可以从图形中完成一些工作。但这不是对数概率,甚至不是策略的梯度。并更改RL渐变政策的整个目的。请检查上面的参考。在图像之后,我们进行下一个声明。

enter image description here

我需要对策略的日志功能进行渐变(简单来说就是权重和sigmoid激活)。

1 个答案:

答案 0 :(得分:7)

问题出在grad方法上。

def grad(self, probs, action, state):
    dsoftmax = self.sigmoid_grad(probs)
    dlog = dsoftmax / probs
    grad = state.T.dot(dlog)
    grad = grad.reshape(5, 1)
    return grad

在原始代码中,Softmax与CrossEntropy损失函数一起使用。当您将激活切换为Sigmoid时,适当的损失函数将变为Binary CrossEntropy。现在,grad方法的目的是计算损失函数wrt的梯度。重量。 Sparing的细节,适当的渐变由(probs - action) * state在您的程序术语中给出。最后一件事是添加减号-我们要使损失函数的负数最大化。

正确的grad方法如下:

def grad(self, probs, action, state):
    grad = state.T.dot(probs - action)
    return -grad

您可能要添加的另一项更改是提高学习率。 LEARNING_RATE = 0.0001NUM_EPISODES = 5000将产生以下曲线:

Correct mean reward vs num of episodes

如果使用具有零均值和小方差的高斯分布初始化权重,收敛将更快:

def __init__(self):
    self.poly = PolynomialFeatures(1)
    self.w = np.random.randn(5, 1) * 0.01

enter image description here

更新

添加了完整的代码以重现结果:

import gym
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures

NUM_EPISODES = 5000
LEARNING_RATE = 0.0001
GAMMA = 0.99


# noinspection PyMethodMayBeStatic
class Agent:
    def __init__(self):
        self.poly = PolynomialFeatures(1)
        self.w = np.random.randn(5, 1) * 0.01

    # Our policy that maps state to action parameterized by w
    # noinspection PyShadowingNames
    def policy(self, state):
        z = np.sum(state.dot(self.w))
        return self.sigmoid(z)

    def sigmoid(self, x):
        s = 1 / (1 + np.exp(-x))
        return s

    def sigmoid_grad(self, sig_x):
        return sig_x * (1 - sig_x)

    def grad(self, probs, action, state):
        grad = state.T.dot(probs - action)
        return -grad

    def update_with(self, grads, rewards):
        if len(grads) < 50:
            return
        for i in range(len(grads)):
            # Loop through everything that happened in the episode
            # and update towards the log policy gradient times **FUTURE** reward

            total_grad_effect = 0
            for t, r in enumerate(rewards[i:]):
                total_grad_effect += r * (GAMMA ** r)
            self.w += LEARNING_RATE * grads[i] * total_grad_effect


def main(argv):
    env = gym.make('CartPole-v0')
    np.random.seed(1)

    agent = Agent()
    complete_scores = []

    for e in range(NUM_EPISODES):
        state = env.reset()[None, :]
        state = agent.poly.fit_transform(state)

        rewards = []
        grads = []
        score = 0

        while True:

            probs = agent.policy(state)
            action_space = env.action_space.n
            action = np.random.choice(action_space, p=[1 - probs, probs])

            next_state, reward, done, _ = env.step(action)
            next_state = next_state[None, :]
            next_state = agent.poly.fit_transform(next_state.reshape(1, 4))

            grad = agent.grad(probs, action, state)
            grads.append(grad)
            rewards.append(reward)

            score += reward
            state = next_state

            if done:
                break

        agent.update_with(grads, rewards)
        complete_scores.append(score)

    env.close()
    plt.plot(np.arange(NUM_EPISODES),
             complete_scores)
    plt.savefig('image1.png')


if __name__ == '__main__':
    main(None)