学习期间DQN颤抖

时间:2019-12-29 10:44:43

标签: tensorflow neural-network reinforcement-learning openai-gym dqn



我只是想继续在神经网络领域学习,并且目前正在基于小例子(cartpole gym-env)设计基于 的情景。我在huskarl套件和健身房环境中使用了tensorflow。
我创建了自己的环境。这是固定大小(10)的一维字段。其中一个上有一个苹果,另一个上有一个代理(启动时都是随机的)。他的目标是直接步行到苹果,而不要离开1D领域。他可能采取的行动是左右移动。如果他到达了苹果的田地,则将苹果放置在新的田地上(随机田地(!= agent-field))。代理商的自身位置和苹果的位置被用作输入。
不幸的是,NN并没有真正学习,但似乎一次又一次地学习正确的错误的步骤(请参阅step-reward graph)。我尝试了不同的NN层并根据距目标的距离进行了奖励,但不幸的是未成功。我有一种错误的感觉,那就是对代理人的奖励不正确,但是我不能肯定地说。有人暗示我该在哪里出错吗?

这是我的启动代码:

from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense

import matplotlib.pyplot as plt
import gym

import huskarl as hk
from OneD_Env import OneD_Env

if __name__ == '__main__':

    create_env = lambda: OneD_Env()
    dummy_env = create_env()

    model = Sequential([
        Dense(16, activation='relu', input_shape=dummy_env.observation_space.shape),
        Dense(16, activation='relu'),
        Dense(16, activation='relu')
    ])

    agent = hk.agent.DQN(model, actions=dummy_env.action_space.n, nsteps=2)

    def plot_rewards(episode_rewards, episode_steps, done=False):
        plt.clf()
        plt.xlabel('Step')
        plt.ylabel('Reward')
        for ed, steps in zip(episode_rewards, episode_steps):
            plt.plot(steps, ed)
        plt.show() if done else plt.pause(0.001) # Pause a bit so that the graph is updated


    sim = hk.Simulation(create_env, agent)
    sim.train(max_steps=10000, visualize=True, plot=plot_rewards)

和我的自定义环境

import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
import random

class OneD_Env(gym.Env):
    metadata = {
        'render.modes': ['human', 'rgb_array'],
        'video.frames_per_second' : 50
    }

    def __init__(self):

        self.boardSize = 10
        self.board = np.zeros((self.boardSize), dtype=int)

        self.apple_pos_x = 0
        self.my_pos_x = 0
        self.stepCount = 0
        self.maxSteps = 0

        input_low = np.array([
            0,      # my position
            0       # apple position
            ])

        input_high = np.array([   
            self.boardSize-1,
            self.boardSize-1
            ])

        self.action_space = spaces.Discrete(2)
        self.observation_space = spaces.Box(-input_high, input_high, dtype=np.int)

        self.viewer = None
        self.state = None
        self.steps_beyond_done = None

    def getDistance(self):
        a = self.my_pos_x
        b = self.apple_pos_x
        if a > b: return a - b
        else : return b - a

    def step(self, action):
        assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
        self.stepCount += 1

        if action == 0:     #right
            self.my_pos_x += 1
        else:   #left
            self.my_pos_x -= 1

        done = False

        staysOnApple = bool(self.my_pos_x == self.apple_pos_x)
        if staysOnApple:
            self.apple_pos_x = random.randint(0, self.boardSize-1)
            if self.my_pos_x == self.apple_pos_x:
                if self.apple_pos_x == self.boardSize-1:
                    self.apple_pos_x -= 1
                else:
                    self.apple_pos_x += 1
            self.stepCount = 0
            self.maxSteps = self.getDistance()
        else:
            if self.my_pos_x < 0 or self.my_pos_x >= self.boardSize or self.stepCount > self.maxSteps:
                done = True

        self.state = (self.my_pos_x, self.apple_pos_x)

        reward = 0.0 

        if not done:
            reward = 1.0
        elif self.steps_beyond_done is None:
            self.steps_beyond_done = 0
            reward = 1.0
        else:
            if self.steps_beyond_done == 0:
                logger.warn("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
            self.steps_beyond_done += 1
            reward = 0.0

        return np.array(self.state), reward, done, {}

    def reset(self):

        self.apple_pos_x = random.randint(0, self.boardSize-1)
        self.my_pos_x = random.randint(0, self.boardSize-1)

        if self.my_pos_x == self.apple_pos_x:
            if self.apple_pos_x == self.boardSize-1:
                self.apple_pos_x -= 1
            else:
                self.apple_pos_x += 1
        self.maxSteps = self.getDistance()
        self.stepCount = 0
        self.steps_beyond_done = None

        self.state = (self.my_pos_x, self.apple_pos_x)
        return np.array(self.state)

    def render(self, mode='human'):
        screen_width = 700
        screen_height = 700
        size = 60

        if self.viewer is None:
            from gym.envs.classic_control import rendering
            self.viewer = rendering.Viewer(screen_width, screen_height)

            for x in range(self.boardSize):
                poly = rendering.FilledPolygon([
                    (50 + (x * size -28), 50 + ( +28)),  
                    (50 + (x * size -28), 50 + ( -28)), 
                    (50 + (x * size +28), 50 + ( -28)),
                    (50 + (x * size +28), 50 + ( +28))
                ])
                poly.set_color(.9, .9, .9)
                self.viewer.add_geom(poly)

            my = rendering.FilledPolygon([
                (-5, 5),  
                (-5, -5), 
                (5, -5),
                (5, 5)
            ])

            apple = rendering.FilledPolygon([
                (-5, 5),  
                (-5, -5), 
                (5, -5),
                (5, 5)
            ])

            my.set_color(.1, .1, .1)
            apple.set_color(.8, .2, .2)

            self.myTrans = rendering.Transform()
            my.add_attr(self.myTrans)

            self.appleTrans = rendering.Transform()
            apple.add_attr(self.appleTrans)

            self.viewer.add_geom(my)
            self.viewer.add_geom(apple)

        if self.state is None: return None

        self.myTrans.set_translation(50 + self.my_pos_x * size, 50)
        self.appleTrans.set_translation(50 + self.apple_pos_x * size, 50)

        return self.viewer.render(return_rgb_array = mode=='rgb_array')

    def close(self):
        if self.viewer:
            self.viewer.close()
            self.viewer = None

0 个答案:

没有答案