MLAgents代理在训练方面并没有进步

时间:2019-12-16 18:03:40

标签: c# unity3d machine-learning artificial-intelligence ml-agent

我最近一直在研究自动平衡的双腿,该双腿应该试图防止角色摔倒。每次执行AgentReset时,腿都会重置其所有必要的因素,例如pos,rot和vel,并且角色下方的地板将旋转的随机变化小于5度。虽然,无论我似乎对代理人进行的观察数量有何看法,他仍然似乎并没有从错误中吸取教训。现在,我是机器学习的新手,所以轻松一点吧!我想念什么?谢谢!

一些笔记: 我不太确定RayPerceptionSensorComponent3D的工作方式。如果有帮助的话,也许有人可以使我朝正确的方向前进。

traininng agent in inspector image description here tensorboard data




代理脚本:

using MLAgents;
using System;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using MLAgents.Sensor;
using Random = UnityEngine.Random;

public class BalanceAgent : Agent
{
    private BalancingArea area;
    public GameObject floor;
    public GameObject waist;
    public GameObject buttR;
    public GameObject buttL;
    public GameObject thighR;
    public GameObject thighL;
    public GameObject legR;
    public GameObject legL;
    public GameObject footR;
    public GameObject footL;

    public GameObject[] bodyParts = new GameObject[9];
    public HingeJoint[] hingeParts = new HingeJoint[9];
    public JointLimits[] jntLimParts = new JointLimits[9];

    public Vector3[] posStart = new Vector3[9];
    public Vector3[] eulerStart = new Vector3[9];

    public RayPerceptionSensorComponent3D raySensors;

    float rayDist = 0;
    float rayAngle = 0;
    Vector3 rayFloorAngle = new Vector3(0,0,0);
    Vector3 rayPoint = new Vector3(0,0,0);

    int rotAgent = 0;

    public void Start() {
        bodyParts = new GameObject[] { waist, buttR, buttL, thighR, thighL, legR, legL, footR, footL };             //Waist = 0, footL = 8.

        for (int i = 0; i < bodyParts.Length; i++) {
            posStart[i] = bodyParts[i].transform.position;
            eulerStart[i] = bodyParts[i].transform.eulerAngles;
            if (bodyParts[i].GetComponent<HingeJoint>() != null) {
                hingeParts[i] = bodyParts[i].GetComponent<HingeJoint>();
                jntLimParts[i] = hingeParts[i].limits;
            }
        }
    }

    public override void InitializeAgent() {
        base.InitializeAgent();
        area = GetComponentInParent<BalancingArea>();
    }

    public override void AgentReset() {
        floor.transform.eulerAngles = new Vector3(Random.Range(-5, 5), 0, Random.Range(-5, 5));
        print("Reset! - " + rotAgent);
        for (int i = 0; i < bodyParts.Length; i++) {
            bodyParts[i].transform.position = posStart[i];
            bodyParts[i].transform.eulerAngles = eulerStart[i];
            if (bodyParts[i].GetComponent<HingeJoint>() != null) {
                jntLimParts[i].max = 1;
                jntLimParts[i].min = -1;
            }
            bodyParts[i].GetComponent<Rigidbody>().velocity = Vector3.zero;
            bodyParts[i].GetComponent<Rigidbody>().angularVelocity = Vector3.zero;
        }
    }

    public override void AgentAction(float[] vectorAction) {

        float buttRDir = 0;
        int buttRVec = (int)vectorAction[0];
        switch (buttRVec) {
            case 1:
                buttRDir = 0;
                break;
            case 2:
                buttRDir = .2f;
                break;
            case 3:
                buttRDir = -.2f;
                break;
        }
        jntLimParts[1].max += buttRDir;
        jntLimParts[1].min = jntLimParts[1].max - 1;
        hingeParts[1].limits = jntLimParts[1];

        float buttLDir = 0;
        int buttLVec = (int)vectorAction[1];
        switch (buttLVec) {
            case 1:
                buttLDir = 0;
                break;
            case 2:
                buttLDir = .2f;
                break;
            case 3:
                buttLDir = -.2f;
                break;
        }
        jntLimParts[2].max += buttLDir;
        jntLimParts[2].min = jntLimParts[2].max - 1;
        hingeParts[2].limits = jntLimParts[2];

        float thighRDir = 0;
        int thighRVec = (int)vectorAction[2];
        switch (thighRVec) {
            case 1:
                thighRDir = 0;
                break;
            case 2:
                thighRDir = .2f;
                break;
            case 3:
                thighRDir = -.2f;
                break;
        }
        jntLimParts[3].max += thighRDir;
        jntLimParts[3].min = jntLimParts[3].max - 1;
        hingeParts[3].limits = jntLimParts[3];

        float thighLDir = 0;
        int thighLVec = (int)vectorAction[3];
        switch (thighLVec) {
            case 1:
                thighLDir = 0;
                break;
            case 2:
                thighLDir = .2f;
                break;
            case 3:
                thighLDir = -.2f;
                break;
        }
        jntLimParts[4].max += thighLDir;
        jntLimParts[4].min = jntLimParts[4].max - 1;
        hingeParts[4].limits = jntLimParts[4];

        float legRDir = 0;
        int legRVec = (int)vectorAction[4];
        switch (legRVec) {
            case 1:
                legRDir = 0;
                break;
            case 2:
                legRDir = .2f;
                break;
            case 3:
                legRDir = -.2f;
                break;
        }
        jntLimParts[5].max += legRDir;
        jntLimParts[5].min = jntLimParts[5].max - 1;
        hingeParts[5].limits = jntLimParts[5];

        float legLDir = 0;
        int legLVec = (int)vectorAction[5];
        switch (legLVec) {
            case 1:
                legLDir = 0;
                break;
            case 2:
                legLDir = .2f;
                break;
            case 3:
                legLDir = -.2f;
                break;
        }
        jntLimParts[6].max += legLDir;
        jntLimParts[6].min = jntLimParts[6].max - 1;
        hingeParts[6].limits = jntLimParts[6];

        float footRDir = 0;
        int footRVec = (int)vectorAction[6];
        switch (footRVec) {
            case 1:
                footRDir = 0;
                break;
            case 2:
                footRDir = .2f;
                break;
            case 3:
                footRDir = -.2f;
                break;
        }
        jntLimParts[7].max += footRDir;
        jntLimParts[7].min = jntLimParts[7].max - 1;
        hingeParts[7].limits = jntLimParts[7];

        float footLDir = 0;
        int footLVec = (int)vectorAction[7];
        switch (footLVec) {
            case 1:
                footLDir = 0;
                break;
            case 2:
                footLDir = .2f;
                break;
            case 3:
                footLDir = -.2f;
                break;
        }
        jntLimParts[8].max += footLDir;
        jntLimParts[8].min = jntLimParts[8].max - 1;
        hingeParts[8].limits = jntLimParts[8];

        float waistDir = 0;
        int waistVec = (int)vectorAction[8];
        switch (footLVec) {
            case 1:
                waistDir = 0;
                break;
            case 2:
                waistDir = .2f;
                break;
            case 3:
                waistDir = -.2f;
                break;
        }
       // waist.transform.Rotate(0, waistDir, 0);

        //buttR = vectorAction[0]; //Right or none
        //if (buttR == 2) buttR = -1f; //Left

        if (waist.transform.position.y > -1.4f) {
            AddReward(.02f);
        }
        else {
            AddReward(-.03f);
        }

        if (waist.transform.position.y <= -3) {
            Done();
            print("He fell too far...");
        }

        RaycastHit hit;
        Ray r;
        if (Physics.Raycast(waist.transform.position, -waist.transform.up, out hit)) {
            rayDist = hit.distance;
            rayPoint = hit.point;
            rayAngle = Vector3.Angle(waist.transform.position, hit.normal);
            rayFloorAngle = hit.collider.transform.eulerAngles;
        }


    }

    public override void CollectObservations() {

        for(int i = 0; i < bodyParts.Length; i++) {
            AddVectorObs(bodyParts[i].transform.position);
            AddVectorObs(bodyParts[i].transform.eulerAngles);
            AddVectorObs(bodyParts[i].GetComponent<Rigidbody>().velocity);
            AddVectorObs(bodyParts[i].GetComponent<Rigidbody>().angularVelocity);
            AddVectorObs(jntLimParts[i].max);
            AddVectorObs(jntLimParts[i].min);
            AddVectorObs(raySensors);
            AddVectorObs(rayDist);
            AddVectorObs(rayPoint);
            AddVectorObs(rayAngle);
            AddVectorObs(rayFloorAngle);
        }
    }
}




区域脚本:

using MLAgents;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
using UnityEngine;

public class BalancingArea : Area
{
    public List<BalanceAgent> BalanceAgent { get; private set; }
    public BalanceAcademy BalanceAcademy { get; private set; }
    public GameObject area;

    private void Awake() {
        BalanceAgent = transform.GetComponentsInChildren<BalanceAgent>().ToList();              //Grabs all agents in area
        BalanceAcademy = FindObjectOfType<BalanceAcademy>();                //Grabs balance acedemy
    }

    private void Start() {

    }

    public void ResetAgentPosition(BalanceAgent agent) {
        //agent.transform.position = new Vector3(area.transform.position.x, 0, area.transform.position.z);
       // agent.transform.eulerAngles = new Vector3(0,0,0);
    }

    // Update is called once per frame
    void Update()
    {

    }
}




学院脚本:

using MLAgents;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;

public class BalanceAcademy : Academy
{

}

5 个答案:

答案 0 :(得分:0)

我认为人们很难快速遵循您的代码。强化学习(rf)有几个主要问题:

  • 您使用哪种模型来存储您的学习成果?流行的是q表,还有使用q学习的深层神经网络
  • 您的奖励功能是什么,您会使用任何惩罚吗?

对我而言,什么是成功的检查和负面奖励的应用,是对我进行哪些检查的补充。在这种情况下,还要检查此更新对您的模型意味着什么。它还会奖励成功之前采取的步骤吗?

模型能够逐步达到目标很重要。例如,此示例的一个很好的奖励是时间的流逝和完美平衡的指标的组合。

我希望它有助于将您定向到正确的方向。

顺便说一句,与像监督学习方法那样构建的构建模块相比,强化学习具有很大的挑战性。它还有助于将它们检出。我真的可以推荐免费的Fastai课程,这是我在机器学习最重要领域中见过的最好的课程。

答案 1 :(得分:0)

我建议您查看ML-Agents github存储库中的Crawler example。您可能希望使用与Crawler相同的观察结果: “每个肢体的位置,旋转,速度和角速度加上身体的加速度和角加速度。”

我还建议您从Crawler中复制trainer_config.yaml设置,以使您有最佳的超参数起点。

作为旁注,RayPerceptionSensorComponent3D在某种程度上是独立工作的,您不需要使用AddVectorObs()将其添加到观察值中,因为它会在后台自动执行此操作。您也不需要再为RayPerception的“空间大小”添加任何值。就是说,您可能根本不想使用RayPerception解决此问题。 ?

答案 2 :(得分:0)

当腰部接触地面时,为什么不尝试使用transform.positions设置奖励和结束情节(例如检查腰部与地面之间的碰撞)(或)检查腰部和其他部位是否处于相同的transform.position并据此进行奖励。

您也可以建立统一项目文件的链接。我想看看你在这里做了什么。

答案 3 :(得分:0)

有可能将其丢弃,但是您是否忘记在--train命令中放入mlagents-learn?这给了我几次。

答案 4 :(得分:0)

对于 207 的观察空间,我强烈建议让您的代理训练超过 10k 步。 500 万或更多可能是合理的。真的,只要尝试更长时间的训练,比如通宵训练,看看是否会发生任何不同的事情。一次拥有多个机器学习机器人训练副本以加快训练速度也有很大帮助。尝试制作环境的多个副本。 (我建议将机器人和飞机放在一个空的游戏对象中,然后将其复制 8-16 次)您不必做任何特殊的事情,只需复制并正常运行,其他副本也将进行训练。