kengz/SLM-Lab

View on GitHub
slm_lab/spec/benchmark/ppo/ppo_roboschool.json

Summary

Maintainability
Test Coverage
{
  "ppo_roboschool": {
    "agent": [{
      "name": "PPO",
      "algorithm": {
        "name": "PPO",
        "action_pdtype": "default",
        "action_policy": "default",
        "explore_var_spec": null,
        "gamma": 0.99,
        "lam": 0.95,
        "clip_eps_spec": {
          "name": "no_decay",
          "start_val": 0.20,
          "end_val": 0.20,
          "start_step": 0,
          "end_step": 0
        },
        "entropy_coef_spec": {
          "name": "no_decay",
          "start_val": 0.0,
          "end_val": 0.0,
          "start_step": 0,
          "end_step": 0
        },
        "val_loss_coef": 1.0,
        "time_horizon": 2048,
        "minibatch_size": 128,
        "training_epoch": 10
      },
      "memory": {
        "name": "OnPolicyBatchReplay",
      },
      "net": {
        "type": "MLPNet",
        "shared": false,
        "hid_layers": [256, 256],
        "hid_layers_activation": "relu",
        "init_fn": "orthogonal_",
        "clip_grad_val": 0.5,
        "use_same_optim": false,
        "loss_spec": {
          "name": "MSELoss"
        },
        "actor_optim_spec": {
          "name": "Lookahead",
          "optimizer": "RAdam",
          "lr": 3e-4,
        },
        "critic_optim_spec": {
          "name": "Lookahead",
          "optimizer": "RAdam",
          "lr": 3e-4,
        },
        "lr_scheduler_spec": null,
        "gpu": false
      }
    }],
    "env": [{
      "name": "${env}",
      "num_envs": 8,
      "max_t": null,
      "max_frame": 2e6
    }],
    "body": {
      "product": "outer",
      "num": 1
    },
    "meta": {
      "distributed": false,
      "log_frequency": 1000,
      "eval_frequency": 1000,
      "rigorous_eval": 0,
      "max_session": 4,
      "max_trial": 1
    },
    "spec_params": {
      "env": [
        "RoboschoolAnt-v1", "RoboschoolAtlasForwardWalk-v1", "RoboschoolHalfCheetah-v1", "RoboschoolHopper-v1", "RoboschoolInvertedDoublePendulum-v1", "RoboschoolInvertedPendulum-v1", "RoboschoolReacher-v1", "RoboschoolWalker2d-v1"
      ]
    }
  },
  "ppo_humanoid": {
    "agent": [{
      "name": "PPO",
      "algorithm": {
        "name": "PPO",
        "action_pdtype": "default",
        "action_policy": "default",
        "explore_var_spec": null,
        "gamma": 0.99,
        "lam": 0.95,
        "clip_eps_spec": {
          "name": "no_decay",
          "start_val": 0.20,
          "end_val": 0.20,
          "start_step": 0,
          "end_step": 0
        },
        "entropy_coef_spec": {
          "name": "no_decay",
          "start_val": 0.0,
          "end_val": 0.0,
          "start_step": 0,
          "end_step": 0
        },
        "val_loss_coef": 1.0,
        "time_horizon": 512,
        "minibatch_size": 4096,
        "training_epoch": 15
      },
      "memory": {
        "name": "OnPolicyBatchReplay",
      },
      "net": {
        "type": "MLPNet",
        "shared": false,
        "hid_layers": [256, 256],
        "hid_layers_activation": "relu",
        "init_fn": "orthogonal_",
        "clip_grad_val": 0.5,
        "use_same_optim": false,
        "loss_spec": {
          "name": "MSELoss"
        },
        "actor_optim_spec": {
          "name": "Lookahead",
          "optimizer": "RAdam",
          "lr": 3e-4,
        },
        "critic_optim_spec": {
          "name": "Lookahead",
          "optimizer": "RAdam",
          "lr": 3e-4,
        },
        "lr_scheduler_spec": null,
        "gpu": false
      }
    }],
    "env": [{
      "name": "RoboschoolHumanoid-v1",
      "num_envs": 32,
      "max_t": null,
      "max_frame": 5e7
    }],
    "body": {
      "product": "outer",
      "num": 1
    },
    "meta": {
      "distributed": false,
      "log_frequency": 10000,
      "eval_frequency": 10000,
      "rigorous_eval": 0,
      "max_session": 4,
      "max_trial": 1
    }
  },
  "ppo_humanoidflagrun": {
    "agent": [{
      "name": "PPO",
      "algorithm": {
        "name": "PPO",
        "action_pdtype": "default",
        "action_policy": "default",
        "explore_var_spec": null,
        "gamma": 0.99,
        "lam": 0.95,
        "clip_eps_spec": {
          "name": "no_decay",
          "start_val": 0.20,
          "end_val": 0.20,
          "start_step": 0,
          "end_step": 0
        },
        "entropy_coef_spec": {
          "name": "no_decay",
          "start_val": 0.0,
          "end_val": 0.0,
          "start_step": 0,
          "end_step": 0
        },
        "val_loss_coef": 1.0,
        "time_horizon": 512,
        "minibatch_size": 4096,
        "training_epoch": 15
      },
      "memory": {
        "name": "OnPolicyBatchReplay",
      },
      "net": {
        "type": "MLPNet",
        "shared": false,
        "hid_layers": [256, 256],
        "hid_layers_activation": "relu",
        "init_fn": "orthogonal_",
        "clip_grad_val": 0.5,
        "use_same_optim": false,
        "loss_spec": {
          "name": "MSELoss"
        },
        "actor_optim_spec": {
          "name": "Lookahead",
          "optimizer": "RAdam",
          "lr": 3e-4,
        },
        "critic_optim_spec": {
          "name": "Lookahead",
          "optimizer": "RAdam",
          "lr": 3e-4,
        },
        "lr_scheduler_spec": null,
        "gpu": false
      }
    }],
    "env": [{
      "name": "RoboschoolHumanoidFlagrun-v1",
      "num_envs": 128,
      "max_t": null,
      "max_frame": 1e8
    }],
    "body": {
      "product": "outer",
      "num": 1
    },
    "meta": {
      "distributed": false,
      "log_frequency": 10000,
      "eval_frequency": 10000,
      "rigorous_eval": 0,
      "max_session": 4,
      "max_trial": 1
    }
  },
  "ppo_humanoidflagrunharder": {
    "agent": [{
      "name": "PPO",
      "algorithm": {
        "name": "PPO",
        "action_pdtype": "default",
        "action_policy": "default",
        "explore_var_spec": null,
        "gamma": 0.99,
        "lam": 0.95,
        "clip_eps_spec": {
          "name": "no_decay",
          "start_val": 0.20,
          "end_val": 0.20,
          "start_step": 0,
          "end_step": 0
        },
        "entropy_coef_spec": {
          "name": "no_decay",
          "start_val": 0.0,
          "end_val": 0.0,
          "start_step": 0,
          "end_step": 0
        },
        "val_loss_coef": 1.0,
        "time_horizon": 512,
        "minibatch_size": 4096,
        "training_epoch": 15
      },
      "memory": {
        "name": "OnPolicyBatchReplay",
      },
      "net": {
        "type": "MLPNet",
        "shared": false,
        "hid_layers": [256, 256],
        "hid_layers_activation": "relu",
        "init_fn": "orthogonal_",
        "clip_grad_val": 0.5,
        "use_same_optim": false,
        "loss_spec": {
          "name": "MSELoss"
        },
        "actor_optim_spec": {
          "name": "Lookahead",
          "optimizer": "RAdam",
          "lr": 3e-4,
        },
        "critic_optim_spec": {
          "name": "Lookahead",
          "optimizer": "RAdam",
          "lr": 3e-4,
        },
        "lr_scheduler_spec": null,
        "gpu": false
      }
    }],
    "env": [{
      "name": "RoboschoolHumanoidFlagrunHarder-v1",
      "num_envs": 128,
      "max_t": null,
      "max_frame": 1e8
    }],
    "body": {
      "product": "outer",
      "num": 1
    },
    "meta": {
      "distributed": false,
      "log_frequency": 10000,
      "eval_frequency": 10000,
      "rigorous_eval": 0,
      "max_session": 4,
      "max_trial": 1
    }
  }
}