出现错误:ValueError:太多值无法解包(预期5)

时间:2018-10-23 13:38:51

标签: python python-3.x openai-gym

import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import os


env = gym.make('CartPole-v0')
state_size = env.observation_space.shape[0]

action_size = env.action_space.n


batch_size = 32

n_episodes = 1000

output_dir = 'model_output/cartpole'

if not os.path.exists(output_dir):
     os.makedirs(output_dir)


class DQNAgent:
     def __init__(self, state_size, action_size):
        self.state_size = state_size
        self.action_size = action_size

        self.memory = deque(maxlen=2000)

        self.gamma = 0.9
        self.epsilon = 1.0
        self.epsilon_decay = 0.995
        self.epsilon_min = 0.05

        self._learning_rate = 0.01

        self.model = self._build_model()

     def _build_model(self):

         model = Sequential()

         model.add(Dense(24, input_dim = self.state_size, activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(24,activation='relu'))
         model.add(Dense(50,activation='relu'))

         model.add(Dense(self.action_size, activation='sigmoid'))
         model.compile(loss='mse', optimizer=Adam(lr=self._learning_rate))

         return model


     def remember(self, state, action, reward, next_state, done):
        self.memory.append((self, state, action, reward, next_state, done))

     def act(self, state):
        if np.random.rand() <= self.epsilon:
           return random.randrange(self.action_size)
        act_values = self.model.predict(state)
        return np.argmax(act_values[0])

     def replay(self, batch_size):

         minibatch = random.sample(self.memory, batch_size)
         print(len(minibatch))
         for state, action, reward, next_state, done in minibatch:
            target = reward
            if not done:
                target = (reward + self.gamma*np.amax(self.model.predict(next_state)[0]))

            target_f = self.model.predict(state)
            target_f[0][action] = target

            self.model.fit(state, target_f, epochs=1, verboss=0)

            if self.epsilon > self.epsilon_min:
               self.epsilon *= self.epsilon_decay

     def load(self,name):
         self.model.load_weights(name)

     def save(self, name):
          self.model.save_weights(name)



agent = DQNAgent(state_size, action_size)

done = False

for e in range(n_episodes):
     state = env.reset()
     state = np.reshape(state, [1, state_size])
     if agent.epsilon > agent.epsilon_min:
        agent.epsilon *= agent.epsilon_decay

     for time in range(5000):

         # env.render()
          action = agent.act(state)

          next_state, reward, done,  _ = env.step(action)

          reward = reward if not done else -10

          next_state = np.reshape(next_state, [1, state_size])

          agent.remember(state, action, reward, next_state, done)

          state = next_state

          if done:
             print("episode: {}/{}, score: {}, e: {:.2}".format(e, n_episodes, time, agent.epsilon))
             break

     if len(agent.memory) > batch_size:

        agent.replay(batch_size)

if e % 50 == 0:
    agent.save(output_dir + "weights_" + '{:04d}'.format(e) + ".hdf5")          

我正在为openai健身房中的柱极环境创建算法,但出现此错误:

回溯(最近通话最近):   文件“ C:/ Users / ardao / Desktop / Ardaficial Intelligence / DQNs / CartPole.py”,第145行,位于     agent.replay(batch_size)   重播文件“ C:/ Users / ardao / Desktop / Ardaficial Intelligence / DQNs / CartPole.py”,第93行     对于状态,动作,奖励,下一个状态,以小批量完成: ValueError:太多值无法解包(预期为5)

我正在关注本教程:https://www.youtube.com/watch?v=OYhFoMySoVs&t=2444s

谢谢

Arda

1 个答案:

答案 0 :(得分:0)

您刚刚添加了一个额外的自我。这应该解决它。如果考虑一下,该错误很容易解释。

要解压缩的值太多(预期为5)

在该行中,您看到您有6。验证youtube中的代码将显示相同的内容。但是,当您开始时,这些很容易错过。祝您好运,我鼓励您花点时间屏住呼吸,下次再慢慢看一遍。也许您可以自己解决。

 self.memory.append((state, action, reward, next_state, done))