使用DQN增加Cartpole-v0损耗

时间:2019-07-10 06:35:23

标签: python pytorch reinforcement-learning openai-gym

嗨,我正在尝试训练DQN以解决健身房的Cartpole问题。 由于某种原因,Loss看起来像这样(橙色线)。你们都可以看看我的代码并提供帮助吗?我已经对超参数进行了一些尝试,所以我认为它们不是这里的问题。

class DQN(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(DQN, self).__init__()
        self.linear1 = nn.Linear(input_dim, 16)
        self.linear2 = nn.Linear(16, 32)
        self.linear3 = nn.Linear(32, 32)
        self.linear4 = nn.Linear(32, output_dim)


    def forward(self, x):
        x = F.relu(self.linear1(x))
        x = F.relu(self.linear2(x))
        x = F.relu(self.linear3(x))
        return self.linear4(x)


final_epsilon = 0.05
initial_epsilon = 1
epsilon_decay = 5000
global steps_done
steps_done = 0


def select_action(state):
    global steps_done
    sample = random.random()
    eps_threshold = final_epsilon + (initial_epsilon - final_epsilon) * \
                    math.exp(-1. * steps_done / epsilon_decay)
    if sample > eps_threshold:
        with torch.no_grad():
            state = torch.Tensor(state)
            steps_done += 1
            q_calc = model(state)
            node_activated = int(torch.argmax(q_calc))
            return node_activated
    else:
        node_activated = random.randint(0,1)
        steps_done += 1
        return node_activated


class ReplayMemory(object): # Stores [state, reward, action, next_state, done]

    def __init__(self, capacity):
        self.capacity = capacity
        self.memory = [[],[],[],[],[]]

    def push(self, data):
        """Saves a transition."""
        for idx, point in enumerate(data):
            #print("Col {} appended {}".format(idx, point))
            self.memory[idx].append(point)

    def sample(self, batch_size):
        rows = random.sample(range(0, len(self.memory[0])), batch_size)
        experiences = [[],[],[],[],[]]
        for row in rows:
            for col in range(5):
                experiences[col].append(self.memory[col][row])
        return experiences

    def __len__(self):
        return len(self.memory[0])


input_dim, output_dim = 4, 2
model = DQN(input_dim, output_dim)
target_net = DQN(input_dim, output_dim)
target_net.load_state_dict(model.state_dict())
target_net.eval()
tau = 2
discount = 0.99

learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

memory = ReplayMemory(65536)
BATCH_SIZE = 128


def optimize_model():
    if len(memory) < BATCH_SIZE:
        return 0
    experiences = memory.sample(BATCH_SIZE)
    state_batch = torch.Tensor(experiences[0])
    action_batch = torch.LongTensor(experiences[1]).unsqueeze(1)
    reward_batch = torch.Tensor(experiences[2])
    next_state_batch = torch.Tensor(experiences[3])
    done_batch = experiences[4]

    pred_q = model(state_batch).gather(1, action_batch)

    next_state_q_vals = torch.zeros(BATCH_SIZE)

    for idx, next_state in enumerate(next_state_batch):
        if done_batch[idx] == True:
            next_state_q_vals[idx] = -1
        else:
            # .max in pytorch returns (values, idx), we only want vals
            next_state_q_vals[idx] = (target_net(next_state_batch[idx]).max(0)[0]).detach()


    better_pred = (reward_batch + next_state_q_vals).unsqueeze(1)

    loss = F.smooth_l1_loss(pred_q, better_pred)
    optimizer.zero_grad()
    loss.backward()
    for param in model.parameters():
        param.grad.data.clamp_(-1, 1)
    optimizer.step()
    return loss


points = []
losspoints = []

#save_state = torch.load("models/DQN_target_11.pth")
#model.load_state_dict(save_state['state_dict'])
#optimizer.load_state_dict(save_state['optimizer'])



env = gym.make('CartPole-v0')
for i_episode in range(5000):
    observation = env.reset()
    episode_loss = 0
    if episode % tau == 0:
        target_net.load_state_dict(model.state_dict())
    for t in range(1000):
        #env.render()
        state = observation
        action = select_action(observation)
        observation, reward, done, _ = env.step(action)

        if done:
            next_state = [0,0,0,0]
        else:
            next_state = observation

        memory.push([state, action, reward, next_state, done])
        episode_loss = episode_loss + float(optimize_model(i_episode))
        if done:
            points.append((i_episode, t+1))
            print("Episode {} finished after {} timesteps".format(i_episode, t+1))
            print("Avg Loss: ", episode_loss / (t+1))
            losspoints.append((i_episode, episode_loss / (t+1)))
            if (i_episode % 100 == 0):
                eps = final_epsilon + (initial_epsilon - final_epsilon) * \
                    math.exp(-1. * steps_done / epsilon_decay)
                print(eps)
            if ((i_episode+1) % 5001 == 0):
                save = {'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}
                torch.save(save, "models/DQN_target_" + str(i_episode // 5000) + ".pth")
            break
env.close()




x = [coord[0] * 100 for coord in points]
y = [coord[1] for coord in points]

x2 = [coord[0] * 100 for coord in losspoints]
y2 = [coord[1] for coord in losspoints]

plt.plot(x, y)
plt.plot(x2, y2)
plt.show()

除了使用env返回的状态而不是像素之外,我基本上遵循pytorch教程。我还更改了重播内存,因为我在那里遇到了问题。除此之外,我把其他所有东西都差不多了。

编辑:

我尝试过小批量过拟合,并且损失看起来像this,而没有更新目标网,而this则没有更新

编辑2:

这绝对是目标网络的问题,我尝试将其删除,并且损失似乎并没有呈指数级增长

1 个答案:

答案 0 :(得分:0)

您的<table> <thead> <th>Header</th> </thead> <tbody> <?php //put your loop here ?> </tbody> </table> 值太小,目标网络更新太小会导致DQN转换不稳定。您可以尝试使用1000(OpenAI Baseline的DQN示例)或10000(Deepmind的Nature论文)。

在Deepmind的2015年Nature论文中,它指出:

  

在线Q学习的第二种修改旨在进一步改善我们的神经网络方法的稳定性,是使用单独的网络在Q学习更新中生成traget yj。更准确地说,每个C更新我们都克隆网络Q以获得目标网络Q',并使用Q'生成Q学习目标y j ,以用于随后的对Q的C更新。   与标准的在线Q学习相比,此修改使算法更稳定,在在线学习中,增加Q(s t ,a t )的更新通常还会增加Q(s t + 1 ,a)表示所有a,因此也增加了目标y j ,可能会导致政策出现振荡或背离。使用较旧的参数集生成目标会增加对Q进行更新的时间与更新影响目标y j 的时间之间的延迟,从而使发散或振荡更加不可能。

Human-level control through deep reinforcement learning, Mnih et al., 2015

我已经使用tautau=2tau=10tau=100tau=1000的设置运行您的代码。 tau=10000的更新频率解决了该问题(达到最大步数200)。

tau=100

enter image description here

tau=2 enter image description here

tau=10 enter image description here

tau=100 enter image description here

tau=1000 enter image description here

下面是代码的修改版本。

tau=10000

这是您的绘图代码的结果。

import random import math import matplotlib.pyplot as plt import torch from torch import nn import torch.nn.functional as F import gym class DQN(nn.Module): def __init__(self, input_dim, output_dim): super(DQN, self).__init__() self.linear1 = nn.Linear(input_dim, 16) self.linear2 = nn.Linear(16, 32) self.linear3 = nn.Linear(32, 32) self.linear4 = nn.Linear(32, output_dim) def forward(self, x): x = F.relu(self.linear1(x)) x = F.relu(self.linear2(x)) x = F.relu(self.linear3(x)) return self.linear4(x) final_epsilon = 0.05 initial_epsilon = 1 epsilon_decay = 5000 global steps_done steps_done = 0 def select_action(state): global steps_done sample = random.random() eps_threshold = final_epsilon + (initial_epsilon - final_epsilon) * \ math.exp(-1. * steps_done / epsilon_decay) if sample > eps_threshold: with torch.no_grad(): state = torch.Tensor(state) steps_done += 1 q_calc = model(state) node_activated = int(torch.argmax(q_calc)) return node_activated else: node_activated = random.randint(0,1) steps_done += 1 return node_activated class ReplayMemory(object): # Stores [state, reward, action, next_state, done] def __init__(self, capacity): self.capacity = capacity self.memory = [[],[],[],[],[]] def push(self, data): """Saves a transition.""" for idx, point in enumerate(data): #print("Col {} appended {}".format(idx, point)) self.memory[idx].append(point) def sample(self, batch_size): rows = random.sample(range(0, len(self.memory[0])), batch_size) experiences = [[],[],[],[],[]] for row in rows: for col in range(5): experiences[col].append(self.memory[col][row]) return experiences def __len__(self): return len(self.memory[0]) input_dim, output_dim = 4, 2 model = DQN(input_dim, output_dim) target_net = DQN(input_dim, output_dim) target_net.load_state_dict(model.state_dict()) target_net.eval() tau = 100 discount = 0.99 learning_rate = 1e-4 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) memory = ReplayMemory(65536) BATCH_SIZE = 128 def optimize_model(): if len(memory) < BATCH_SIZE: return 0 experiences = memory.sample(BATCH_SIZE) state_batch = torch.Tensor(experiences[0]) action_batch = torch.LongTensor(experiences[1]).unsqueeze(1) reward_batch = torch.Tensor(experiences[2]) next_state_batch = torch.Tensor(experiences[3]) done_batch = experiences[4] pred_q = model(state_batch).gather(1, action_batch) next_state_q_vals = torch.zeros(BATCH_SIZE) for idx, next_state in enumerate(next_state_batch): if done_batch[idx] == True: next_state_q_vals[idx] = -1 else: # .max in pytorch returns (values, idx), we only want vals next_state_q_vals[idx] = (target_net(next_state_batch[idx]).max(0)[0]).detach() better_pred = (reward_batch + next_state_q_vals).unsqueeze(1) loss = F.smooth_l1_loss(pred_q, better_pred) optimizer.zero_grad() loss.backward() for param in model.parameters(): param.grad.data.clamp_(-1, 1) optimizer.step() return loss points = [] losspoints = [] #save_state = torch.load("models/DQN_target_11.pth") #model.load_state_dict(save_state['state_dict']) #optimizer.load_state_dict(save_state['optimizer']) env = gym.make('CartPole-v0') for i_episode in range(5000): observation = env.reset() episode_loss = 0 if i_episode % tau == 0: target_net.load_state_dict(model.state_dict()) for t in range(1000): #env.render() state = observation action = select_action(observation) observation, reward, done, _ = env.step(action) if done: next_state = [0,0,0,0] else: next_state = observation memory.push([state, action, reward, next_state, done]) episode_loss = episode_loss + float(optimize_model()) if done: points.append((i_episode, t+1)) print("Episode {} finished after {} timesteps".format(i_episode, t+1)) print("Avg Loss: ", episode_loss / (t+1)) losspoints.append((i_episode, episode_loss / (t+1))) if (i_episode % 100 == 0): eps = final_epsilon + (initial_epsilon - final_epsilon) * \ math.exp(-1. * steps_done / epsilon_decay) print(eps) if ((i_episode+1) % 5001 == 0): save = {'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()} torch.save(save, "models/DQN_target_" + str(i_episode // 5000) + ".pth") break env.close() x = [coord[0] * 100 for coord in points] y = [coord[1] for coord in points] x2 = [coord[0] * 100 for coord in losspoints] y2 = [coord[1] for coord in losspoints] plt.plot(x, y) plt.plot(x2, y2) plt.show() enter image description here

tau=100 Blue is the average period and orange is the loss